1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
|
@bootstrap
Feature: Regression test for bootstrap bugs
Tag @clean means need to stop cluster service if the service is available
Need nodes: hanode1 hanode2 hanode3
@clean
Scenario: Stages dependency (bsc#1175865)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Try "crm cluster init cluster -y" on "hanode1"
Then Except "ERROR: cluster.init: Please run 'ssh' stage first"
When Run "crm cluster init ssh -y" on "hanode1"
When Try "crm cluster init cluster -y" on "hanode1"
Then Except "ERROR: cluster.init: Please run 'firewalld' stage first"
When Try "crm cluster init firewalld -y" on "hanode1"
When Try "crm cluster init cluster -y" on "hanode1"
Then Except "ERROR: cluster.init: Please run 'csync2' stage first"
When Run "crm cluster init csync2 -y" on "hanode1"
When Try "crm cluster init cluster -y" on "hanode1"
Then Except "ERROR: cluster.init: Please run 'corosync' stage first"
When Run "crm cluster init corosync -y" on "hanode1"
When Run "crm cluster init cluster -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
Then Except "ERROR: cluster.join: Please run 'ssh' stage first"
When Try "crm cluster join ssh -c hanode1 -y" on "hanode2"
When Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
Then Except "ERROR: cluster.join: Please run 'firewalld' stage first"
When Try "crm cluster join firewalld -c hanode1 -y" on "hanode2"
When Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
Then Except "ERROR: cluster.join: Please run 'csync2' stage first"
When Try "crm cluster join csync2 -c hanode1 -y" on "hanode2"
When Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
@clean
Scenario: Set placement-strategy value as "default"(bsc#1129462)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
And Show cluster status on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
And Online nodes are "hanode1 hanode2"
And Show cluster status on "hanode1"
When Run "crm configure get_property placement-strategy" on "hanode1"
Then Got output "default"
@clean
Scenario: Empty value not allowed for option(bsc#1141976)
When Try "crm -c ' '"
Then Except "ERROR: Empty value not allowed for dest "cib""
When Try "crm cluster init --name ' '"
Then Except "ERROR: cluster.init: Empty value not allowed for dest "cluster_name""
When Try "crm cluster join -c ' '"
Then Except "ERROR: cluster.join: Empty value not allowed for dest "cluster_node""
When Try "crm cluster remove -c ' '"
Then Except "ERROR: cluster.remove: Empty value not allowed for dest "cluster_node""
When Try "crm cluster geo_init -a ' '"
Then Except "ERROR: cluster.geo_init: Empty value not allowed for dest "arbitrator""
When Try "crm cluster geo_join -c ' '"
Then Except "ERROR: cluster.geo_join: Empty value not allowed for dest "cluster_node""
When Try "crm cluster geo_init_arbitrator -c ' '"
Then Except "ERROR: cluster.geo_init_arbitrator: Empty value not allowed for dest "cluster_node""
@clean
Scenario: Setup cluster with crossed network
Given Cluster service is "stopped" on "hanode1"
Given Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -i eth0 -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "iptables -A INPUT -i eth1 -s @hanode1.ip.0 -j DROP" on "hanode2"
When Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
Then Cluster service is "stopped" on "hanode2"
And Except "Cannot see peer node "hanode1", please check the communication IP" in stderr
When Run "crm cluster join -c hanode1 -i eth0 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Run "iptables -D INPUT -i eth1 -s @hanode1.ip.0 -j DROP" on "hanode2"
@clean
Scenario: Remove correspond nodelist in corosync.conf while remove(bsc#1165644)
Given Cluster service is "stopped" on "hanode1"
Given Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -i eth1 -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
Then Expected "@hanode2.ip.1" in stdout
#And Service "hawk.service" is "started" on "hanode2"
When Run "crm cluster remove hanode2 -y" on "hanode1"
Then Online nodes are "hanode1"
And Cluster service is "stopped" on "hanode2"
# verify bsc#1175708
#And Service "hawk.service" is "stopped" on "hanode2"
When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
Then Expected "@hanode2.ip.1" not in stdout
@clean
Scenario: Multi nodes join in parallel(bsc#1175976)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
And Cluster service is "stopped" on "hanode3"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
And Show cluster status on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2,hanode3"
Then Cluster service is "started" on "hanode2"
And Cluster service is "started" on "hanode3"
And Online nodes are "hanode1 hanode2 hanode3"
And Show cluster status on "hanode1"
And File "/etc/corosync/corosync.conf" was synced in cluster
And two_node in corosync.conf is "0"
@clean
Scenario: Multi nodes join in parallel timed out(bsc#1175976)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
And Cluster service is "stopped" on "hanode3"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
And Show cluster status on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
# Try to simulate the join process hanging on hanode2 or hanode2 died
# Just leave the lock directory unremoved
When Run "mkdir /run/.crmsh_lock_directory" on "hanode1"
When Try "crm cluster join -c hanode1 -y" on "hanode3"
Then Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (hanode1:/run/.crmsh_lock_directory)"
When Run "rm -rf /run/.crmsh_lock_directory" on "hanode1"
@clean
Scenario: Change host name in /etc/hosts as alias(bsc#1183654)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "echo '@hanode1.ip.0 HANODE1'|sudo tee -a /etc/hosts" on "hanode1"
When Run "echo '@hanode2.ip.0 HANODE2'|sudo tee -a /etc/hosts" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c HANODE1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
And Online nodes are "hanode1 hanode2"
When Run "crm cluster remove HANODE2 -y" on "hanode1"
Then Cluster service is "stopped" on "hanode2"
And Online nodes are "hanode1"
@clean
Scenario: Stop service quickly(bsc#1203601)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Run "crm cluster stop --all" on "hanode1"
Then Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster start --all;sudo crm cluster stop --all" on "hanode1"
Then Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "systemctl start corosync" on "hanode1"
Then Service "corosync" is "started" on "hanode1"
When Run "crm cluster stop" on "hanode1"
Then Service "corosync" is "stopped" on "hanode1"
@clean
Scenario: Can't start cluster with --all option if no cib(bsc#1219052)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
And Online nodes are "hanode1 hanode2"
When Run "crm cluster stop --all" on "hanode1"
Then Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "rm -f /var/lib/pacemaker/cib/*" on "hanode1"
When Run "rm -f /var/lib/pacemaker/cib/*" on "hanode2"
And Run "crm cluster start --all" on "hanode1"
Then Cluster service is "started" on "hanode1"
Then Cluster service is "started" on "hanode2"
When Try "crm cluster start xxx"
Then Except "ERROR: cluster.start: Node 'xxx' is not a member of the cluster"
@clean
Scenario: Can't stop all nodes' cluster service when local node's service is down(bsc#1213889)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Wait for DC
And Run "crm cluster stop" on "hanode1"
And Run "crm cluster stop --all" on "hanode1"
Then Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
@skip_non_root
@clean
Scenario: crm cluster join default behavior change in ssh key handling (bsc#1210693)
Given Cluster service is "stopped" on "hanode1"
Given Cluster service is "stopped" on "hanode2"
When Run "rm -rf /home/alice/.ssh" on "hanode1"
When Run "rm -rf /home/alice/.ssh" on "hanode2"
When Run "su - alice -c "sudo crm cluster init -y"" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "su - alice -c "sudo crm cluster join -c hanode1 -y"" on "hanode2"
Then Cluster service is "started" on "hanode2"
@skip_non_root
@clean
Scenario: Passwordless for root, not for sudoer(bsc#1209193)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Run "useradd -m -s /bin/bash xin" on "hanode1"
When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
When Run "useradd -m -s /bin/bash xin" on "hanode2"
When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
When Run "su xin -c "sudo crm cluster run 'touch /tmp/1209193'"" on "hanode1"
And Run "test -f /tmp/1209193" on "hanode1"
And Run "test -f /tmp/1209193" on "hanode2"
@skip_non_root
@clean
Scenario: Missing public key
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
And Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
@skip_non_root
@clean
Scenario: Skip upgrade when preconditions are not satisfied
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
Then Cluster service is "started" on "hanode1"
When Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Cluster service is "started" on "hanode2"
When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
And Run "mv /root/.config/crm/crm.conf{,.bak}" on "hanode1"
Then Run "crm status" OK on "hanode1"
When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
And Run "mv /root/.config/crm/crm.conf{.bak,}" on "hanode1"
And Run "mv /root/.ssh{,.bak}" on "hanode1"
Then Run "crm status" OK on "hanode1"
And Run "rm -rf /root/.ssh && mv /root/.ssh{.bak,}" OK on "hanode1"
# skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
@skip_non_root
@clean
Scenario: Owner and permssion of file authorized_keys (bsc#1217279)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
# in a newly created cluster
When Run "crm cluster init -y" on "hanode1"
And Run "crm cluster join -c hanode1 -y" on "hanode2"
Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
And Expected "hacluster:haclient" in stdout
And Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
And Expected "hacluster:haclient" in stdout
@clean
Scenario: Ditch no-quorum-policy=ignore when joining
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
And Run "crm configure property no-quorum-policy=ignore" on "hanode1"
And Run "crm configure show" on "hanode1"
Then Expected "no-quorum-policy=ignore" in stdout
When Run "crm cluster join -c hanode1 -y" on "hanode2"
And Run "crm configure show" on "hanode1"
Then Expected "no-quorum-policy=ignore" not in stdout
@clean
Scenario: Join when `core.hosts` is not available from the seed node (bsc#1245343)
Given Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
When Run "crm cluster init -y" on "hanode1"
And Run "crm cluster join -c hanode1 -y" on "hanode2"
And Run "rm -r /root/.config/crm" on "hanode1,hanode2"
And Run "crm cluster join -c hanode1 -y" on "hanode3"
Then Cluster service is "started" on "hanode3"
When Run "crm cluster stop --all" on "hanode3"
Then Cluster service is "stopped" on "hanode1"
And Cluster service is "stopped" on "hanode2"
And Cluster service is "stopped" on "hanode3"
|