1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
|
# This file is part of Cockpit.
#
# Copyright (C) 2021 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
from testlib import *
from netlib import NetworkHelpers
from storagelib import StorageHelpers
from machinesxmls import *
class VirtualMachinesCaseHelpers:
created_pool = False
def performAction(self, vmName, action, checkExpectedState=True):
b = self.browser
b.click("#vm-{0}-action-kebab button".format(vmName))
b.wait_visible("#vm-{0}-action-kebab > .pf-c-dropdown__menu".format(vmName))
b.click("#vm-{0}-{1} a".format(vmName, action))
if not checkExpectedState:
return
if action == "pause":
b.wait_in_text("#vm-{0}-state".format(vmName), "Paused")
if action == "resume" or action == "run":
b.wait_in_text("#vm-{0}-state".format(vmName), "Running")
if action == "forceOff":
b.wait_in_text("#vm-{0}-state".format(vmName), "Shut off")
def goToVmPage(self, vmName, connectionName='system'):
self.browser.click("tbody tr[data-row-id=vm-{0}-{1}] a.vm-list-item-name".format(vmName, connectionName)) # click on the row
def goToMainPage(self):
self.browser.click(".machines-listing-breadcrumb li:first-of-type a")
def waitVmRow(self, vmName, connectionName='system', present=True):
b = self.browser
vm_row = "tbody tr[data-row-id=vm-{0}-{1}]".format(vmName, connectionName)
if present:
b.wait_visible(vm_row)
else:
b.wait_not_present(vm_row)
def togglePoolRow(self, poolName, connectionName="system"):
isExpanded = 'pf-m-expanded' in self.browser.attr("tbody tr[data-row-id=pool-{0}-{1}] + tr".format(poolName, connectionName), "class") # click on the row header
self.browser.click("tbody tr[data-row-id=pool-{0}-{1}] .pf-c-table__toggle button".format(poolName, connectionName)) # click on the row header
if isExpanded:
self.browser.wait_not_present("tbody tr[data-row-id=pool-{0}-{1}] + tr.pf-m-expanded".format(poolName, connectionName)) # click on the row header
else:
self.browser.wait_visible("tbody tr[data-row-id=pool-{0}-{1}] + tr.pf-m-expanded".format(poolName, connectionName)) # click on the row header
def waitPoolRow(self, poolName, connectionName="system", present="true"):
b = self.browser
pool_row = "tbody tr[data-row-id=pool-{0}-{1}]".format(poolName, connectionName)
if present:
b.wait_visible(pool_row)
else:
b.wait_not_present(pool_row)
def toggleNetworkRow(self, networkName, connectionName="system"):
isExpanded = 'pf-m-expanded' in self.browser.attr("tbody tr[data-row-id=network-{0}-{1}] + tr".format(networkName, connectionName), "class") # click on the row header
self.browser.click("tbody tr[data-row-id=network-{0}-{1}] .pf-c-table__toggle button".format(networkName, connectionName)) # click on the row header
if isExpanded:
self.browser.wait_not_present("tbody tr[data-row-id=network-{0}-{1}] + tr.pf-m-expanded".format(networkName, connectionName)) # click on the row header
else:
self.browser.wait_visible("tbody tr[data-row-id=network-{0}-{1}] + tr.pf-m-expanded".format(networkName, connectionName)) # click on the row header
def waitNetworkRow(self, networkName, connectionName="system", present="true"):
b = self.browser
network_row = "tbody tr[data-row-id=network-{0}-{1}]".format(networkName, connectionName)
if present:
b.wait_visible(network_row)
else:
b.wait_not_present(network_row)
def startLibvirt(self):
m = self.machine
# Ensure everything has started correctly
m.execute("systemctl start libvirtd.service")
# Wait until we can get a list of domains
m.execute("until virsh list; do sleep 1; done")
# Wait for the network 'default' to become active
m.execute("virsh net-define /etc/libvirt/qemu/networks/default.xml || true")
m.execute("virsh net-start default || true")
m.execute("until virsh net-info default | grep 'Active:\s*yes'; do sleep 1; done")
def createVm(self, name, graphics='spice', ptyconsole=False, running=True):
m = self.machine
image_file = m.pull("cirros")
img = "/var/lib/libvirt/images/{0}-2.img".format(name)
m.upload([image_file], img)
m.execute("chmod 777 {0}".format(img))
args = {
"name": name,
"image": img,
"logfile": None,
"console": "",
}
if ptyconsole:
args["console"] = PTYCONSOLE_XML
else:
m.execute("chmod 777 /var/log/libvirt")
args["logfile"] = "/var/log/libvirt/console-{0}.log".format(name)
args["console"] = CONSOLE_XML.format(log=args["logfile"])
if graphics == 'spice':
cxml = SPICE_XML
elif graphics == 'vnc':
cxml = VNC_XML
elif graphics == 'none':
cxml = ""
else:
assert False, "invalid value for graphics"
args["graphics"] = cxml.format(**args)
if not self.created_pool:
xml = POOL_XML.format(path="/var/lib/libvirt/images")
m.execute("echo \"{0}\" > /tmp/xml && virsh pool-define /tmp/xml && virsh pool-start images".format(xml))
self.created_pool = True
xml = DOMAIN_XML.format(**args)
m.execute("echo \"{0}\" > /tmp/xml && virsh define /tmp/xml{1}".format(xml,
" && virsh start {}".format(name) if running else ""))
m.execute('[ "$(virsh domstate {0})" = {1} ] || {{ virsh dominfo {0} >&2; cat /var/log/libvirt/qemu/{0}.log >&2; exit 1; }}'.format(name,
"running" if running else "\"shut off\""))
# TODO check if kernel is booted
# Ideally we would like to check guest agent event for that
# Libvirt has a signal for that too: VIR_DOMAIN_EVENT_ID_AGENT_LIFECYCLE
# https://libvirt.org/git/?p=libvirt-python.git;a=blob;f=examples/guest-vcpus/guest-vcpu-daemon.py;h=30fcb9ce24165c59dec8d9bbe6039f56382e81e3;hb=HEAD
self.allow_journal_messages('.*denied.*comm="pmsignal".*')
return args
# Preparations for iscsi storage pool; return the system's initiator name
def prepareStorageDeviceOnISCSI(self, target_iqn):
m = self.machine
# ensure that we generate a /etc/iscsi/initiatorname.iscsi
m.execute("systemctl start iscsid")
orig_iqn = m.execute("sed -n '/^InitiatorName=/ { s/^.*=//; p }' /etc/iscsi/initiatorname.iscsi").strip()
# Increase the iSCSI timeouts for heavy load during our testing
self.sed_file(r"s|^\(node\..*log.*_timeout = \).*|\1 60|", "/etc/iscsi/iscsid.conf")
# make sure this gets cleaned up, to avoid reboot hangs (https://bugzilla.redhat.com/show_bug.cgi?id=1817241)
self.restore_dir("/var/lib/iscsi")
# Setup a iSCSI target
m.execute("""
targetcli /backstores/ramdisk create test 50M
targetcli /iscsi create %(tgt)s
targetcli /iscsi/%(tgt)s/tpg1/luns create /backstores/ramdisk/test
targetcli /iscsi/%(tgt)s/tpg1/acls create %(ini)s
""" % {"tgt": target_iqn, "ini": orig_iqn})
self.addCleanup(m.execute, "targetcli /backstores/ramdisk delete test && targetcli /iscsi delete %s && (iscsiadm -m node -o delete || true)" % target_iqn)
return orig_iqn
class VirtualMachinesCase(MachineCase, VirtualMachinesCaseHelpers, StorageHelpers, NetworkHelpers):
def setUp(self):
super().setUp()
m = self.machine
# Keep pristine state of libvirt
self.restore_dir("/var/lib/libvirt")
self.restore_dir("/etc/libvirt")
if m.image in ["ubuntu-2004", "ubuntu-stable"]:
# https://bugs.launchpad.net/ubuntu/+source/libvirt-dbus/+bug/1892757
m.execute("usermod -a -G libvirt libvirtdbus")
self.startLibvirt()
self.addCleanup(m.execute, "systemctl stop libvirtd")
# Stop all domains
self.addCleanup(m.execute, "for d in $(virsh list --name); do virsh destroy $d || true; done")
# Cleanup pools
self.addCleanup(m.execute, "rm -rf /run/libvirt/storage/*")
# Stop all pools
self.addCleanup(m.execute, "for n in $(virsh pool-list --all --name); do virsh pool-destroy $n || true; done")
# Cleanup networks
self.addCleanup(m.execute, "rm -rf /run/libvirt/network/test_network*")
# Stop all networks
self.addCleanup(m.execute, "for n in $(virsh net-list --all --name); do virsh net-destroy $n || true; done")
# we don't have configuration to open the firewall for local libvirt machines, so just stop firewalld
m.execute("systemctl stop firewalld; systemctl try-restart libvirtd")
# FIXME: report downstream; AppArmor noisily denies some operations, but they are not required for us
self.allow_journal_messages('.* type=1400 .* apparmor="DENIED" operation="capable" profile="\S*libvirtd.* capname="sys_rawio".*')
# AppArmor doesn't like the non-standard path for our storage pools
self.allow_journal_messages('.* type=1400 .* apparmor="DENIED" operation="open" profile="virt-aa-helper" name="%s.*' % self.vm_tmpdir)
if m.image in ["ubuntu-2004", "ubuntu-stable"]:
self.allow_journal_messages('.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="/" .* denied_mask="r" .*')
self.allow_journal_messages('.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="/sys/bus/nd/devices/" .* denied_mask="r" .*')
# FIXME: testDomainMemorySettings on Fedora-32 reports this. Figure out where it comes from.
# Ignoring just to unbreak tests for now
self.allow_journal_messages("Failed to get COMM: No such process")
m.execute("virsh net-define /etc/libvirt/qemu/networks/default.xml || true")
# avoid error noise about resources getting cleaned up
self.addCleanup(self.browser.logout)
|