Skip to content

lib/vdi: Add VDI resize support and integrate into VM & linstor operations #273

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
37 changes: 36 additions & 1 deletion conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
pytest.fail("This test requires exactly one --sr-disk parameter")
disk = disks[0]
master_disks = host.available_disks()
assert len(master_disks) > 0, "a free disk device is required on the master host"
assert len(master_disks) > 0, "A free disk device is required on the master host"

if disk != "auto":
assert disk in master_disks, \
Expand All @@ -329,6 +329,41 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
logging.info(f">> Disk or block device {disk} is present and free on all pool members")
yield candidates[0]

@pytest.fixture(scope='session')
def sr_disks_for_all_hosts(pytestconfig, request, host):
disks = pytestconfig.getoption("sr_disk")
assert len(disks) > 0, "This test requires at least one --sr-disk parameter"
# Fetch available disks on the master host
master_disks = host.available_disks()
assert len(master_disks) > 0, "A free disk device is required on the master host"

if "auto" not in disks:
# Validate that all specified disks exist on the master host
for disk in disks:
assert disk in master_disks, \
f"Disk or block device {disk} is either not present or already used on the master host"
master_disks = [disk for disk in disks if disk in master_disks]

candidates = list(master_disks)

# Check if all disks are available on all hosts in the pool
for h in host.pool.hosts[1:]:
other_disks = h.available_disks()
candidates = [d for d in candidates if d in other_disks]

if "auto" in disks:
# Automatically select disks if "auto" is passed
assert len(candidates) > 0, \
f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}."
logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. "
f"Using: {', '.join(candidates)}")
else:
# Ensure specified disks are free on all hosts
assert len(candidates) == len(disks), \
f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts."
logging.info(f">> Disk(s) {', '.join(candidates)} are present and free on all pool members")
yield candidates

@pytest.fixture(scope='module')
def vm_ref(request):
ref = request.param
Expand Down
4 changes: 4 additions & 0 deletions lib/vdi.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ def clone(self):
def readonly(self):
return self.param_get("read-only") == "true"

def resize(self, vdi_size):
logging.info("Resize VDI %s", self.uuid)
self.sr.pool.master.xe('vdi-resize', {'uuid': self.uuid, 'disk-size': vdi_size})

def __str__(self):
return f"VDI {self.uuid} on SR {self.sr.uuid}"

Expand Down
22 changes: 22 additions & 0 deletions lib/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from lib.common import PackageManagerEnum, parse_xe_dict, safe_split, strtobool, wait_for, wait_for_not
from lib.snapshot import Snapshot
from lib.vif import VIF
from lib.vdi import VDI

class VM(BaseVM):
def __init__(self, uuid, host):
Expand Down Expand Up @@ -130,6 +131,9 @@ def wait_for_vm_running_and_ssh_up(self):
self.wait_for_os_booted()
wait_for(self.is_ssh_up, "Wait for SSH up")

def wait_for_vm_running(self):
wait_for(self.is_running)

def ssh_touch_file(self, filepath):
logging.info("Create file on VM (%s)" % filepath)
self.ssh(['touch', filepath])
Expand Down Expand Up @@ -361,6 +365,24 @@ def test_snapshot_on_running_vm(self):
finally:
snapshot.destroy(verify=True)

def vdi_resize(self, vdi_uuid, new_size=None):
# Reference
# MAX_VHD_SIZE = 2088960 * 1024 * 1024 # 2088960 MB
# MIN_VHD_SIZE = 1 * 1024 * 1024 # 1MB

# Following assert may be removed in future if we support online vdi resize
assert not self.is_running(), f"Expected halted, got running"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are more states than just "running" and halted, that would probably need assert self.is_halted() here


vdi_obj = VDI(vdi_uuid, sr=self.get_sr())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just vdi would be suitable, no need for _obj

if new_size is None:
# Resize by double, ideally it should be within min and max limits reference
vdi_size = int(vdi_obj.param_get("virtual-size"))
new_size = str(vdi_size * 2)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be cleaner to use integers to manipulate size objects. Host.xe() already handles the conversion to string anyway.
To make such things more visible, it would be great to add type hints to new methods, eg:

def vdi_resize(self, vdi_uuid: str, new_size: Optional[int] = None) -> None:
    ...


vdi_obj.resize(new_size)
resized_vdi_size = vdi_obj.param_get("virtual-size")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same idea, better convert to int directly here

assert int(resized_vdi_size) == int(new_size), f"Expected {new_size}, got {resized_vdi_size}"

def get_messages(self, name):
args = {
'obj-uuid': self.uuid,
Expand Down
72 changes: 50 additions & 22 deletions tests/storage/linstor/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,57 +12,85 @@
LINSTOR_PACKAGE = 'xcp-ng-linstor'

@pytest.fixture(scope='package')
def lvm_disk(host, sr_disk_for_all_hosts):
device = '/dev/' + sr_disk_for_all_hosts
def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type):
devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts]
hosts = host.pool.hosts

for host in hosts:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
for device in devices:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e

host.ssh(['vgcreate', GROUP_NAME, device])
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
device_list = " ".join(devices)
host.ssh(['vgcreate', GROUP_NAME] + devices)
if provisioning_type == 'thin':
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])

yield device
yield devices

for host in hosts:
host.ssh(['vgremove', '-f', GROUP_NAME])
host.ssh(['pvremove', device])
for device in devices:
host.ssh(['pvremove', device])

@pytest.fixture(scope="package")
def storage_pool_name(provisioning_type):
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME

@pytest.fixture(params=["thin", "thick"], scope="session")
def provisioning_type(request):
return request.param

@pytest.fixture(scope='package')
def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
def pool_with_linstor(hostA2, lvm_disks, pool_with_saved_yum_state):
import concurrent.futures
pool = pool_with_saved_yum_state
for host in pool.hosts:

def is_linstor_installed(host):
if host.is_package_installed(LINSTOR_PACKAGE):
raise Exception(
f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.'
)

for host in pool.hosts:
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(is_linstor_installed, pool.hosts)

def install_linstor(host):
logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...")
host.yum_install([LINSTOR_RELEASE_PACKAGE])
host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing")
# Needed because the linstor driver is not in the xapi sm-plugins list
# before installing the LINSTOR packages.
host.ssh(["systemctl", "restart", "multipathd"])
host.restart_toolstack(verify=True)

with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(install_linstor, pool.hosts)

yield pool

def remove_linstor(host):
logging.info(f"Cleaning up python-linstor from host {host}...")
host.yum_remove(["python-linstor"])

with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(remove_linstor, pool.hosts)

@pytest.fixture(scope='package')
def linstor_sr(pool_with_linstor):
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'group-name': storage_pool_name,
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
'provisioning': 'thin'
'provisioning': provisioning_type
}, shared=True)
yield sr
sr.destroy()
Expand Down
31 changes: 21 additions & 10 deletions tests/storage/linstor/test_linstor_sr.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
import pytest
import time

from .conftest import STORAGE_POOL_NAME, LINSTOR_PACKAGE
from .conftest import LINSTOR_PACKAGE
from lib.commands import SSHCommandFailed
from lib.common import wait_for, vm_image
from tests.storage import vdi_is_open

# Requirements:
# - one XCP-ng host >= 8.2 with an additional unused disk for the SR
# - two or more XCP-ng hosts >= 8.2 with additional unused disk(s) for the SR
# - access to XCP-ng RPM repository from the host

class TestLinstorSRCreateDestroy:
Expand All @@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy:
and VM import.
"""

def test_create_sr_without_linstor(self, host, lvm_disk):
def test_create_sr_without_linstor(self, host, lvm_disks, provisioning_type, storage_pool_name):
# This test must be the first in the series in this module
assert not host.is_package_installed('python-linstor'), \
"linstor must not be installed on the host at the beginning of the tests"
try:
sr = host.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'group-name': storage_pool_name,
'redundancy': '1',
'provisioning': 'thin'
'provisioning': provisioning_type
}, shared=True)
try:
sr.destroy()
Expand All @@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk):
except SSHCommandFailed as e:
logging.info("SR creation failed, as expected: {}".format(e))

def test_create_and_destroy_sr(self, pool_with_linstor):
def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name):
# Create and destroy tested in the same test to leave the host as unchanged as possible
master = pool_with_linstor.master
sr = master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'group-name': storage_pool_name,
'redundancy': '1',
'provisioning': 'thin'
'provisioning': provisioning_type
}, shared=True)
# import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in
# the next tests, because errors in fixtures break teardown
Expand Down Expand Up @@ -78,6 +78,15 @@ def test_snapshot(self, vm_on_linstor_sr):
finally:
vm.shutdown(verify=True)

@pytest.mark.small_vm
@pytest.mark.big_vm
def test_resize_vdi(self, vm_on_linstor_sr):
vm = vm_on_linstor_sr
logging.info("* VDI Resize started *")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we really want * marker in the logs?

for vdi_uuid in vm.vdi_uuids():
vm.vdi_resize(vdi_uuid)
logging.info("* VDI Resize completed *")

# *** tests with reboots (longer tests).

@pytest.mark.reboot
Expand Down Expand Up @@ -147,7 +156,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl

class TestLinstorDisklessResource:
@pytest.mark.small_vm
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, storage_pool_name):
vm = vm_on_linstor_sr
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
vdi_uuid = vdi_uuids[0]
Expand All @@ -157,10 +166,12 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
for member in host.pool.hosts:
controller_option += f"{member.hostname_or_ip},"

sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_")

# Get volume name from VDI uuid
# "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}"
output = host.ssh([
"linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device",
"linstor-kv-tool", "--dump-volumes", "-g", sr_group_name,
"|", "grep", "volume-name", "|", "grep", vdi_uuid
])
volume_name = output.split(': ')[1].split('"')[1]
Expand Down