Browse Source

Add storage pool support

LXD has grown support for multiple storage pools
defined by name and type (btrfs, zfs, lvm, etc)
and we would like to support this in nova-lxd

Closes-Bug: 1692962

Change-Id: I702d1600fdf70bfd1e2402e3455dd868e25214c0
(cherry picked from commit d883fa99eb84cf5f24b8acdb94600ac25629edc3)
tags/15.0.2^0
Chris MacNaughton 2 years ago
parent
commit
6e57a5a2d2
4 changed files with 58 additions and 3 deletions
  1. +1
    -0
      nova/tests/unit/virt/lxd/test_driver.py
  2. +34
    -0
      nova/tests/unit/virt/lxd/test_flavor.py
  3. +3
    -0
      nova/virt/lxd/driver.py
  4. +20
    -3
      nova/virt/lxd/flavor.py

+ 1
- 0
nova/tests/unit/virt/lxd/test_driver.py View File

@@ -128,6 +128,7 @@ class LXDDriverTest(test.NoDBTestCase):
self.patchers.append(CONF2_patcher)
self.CONF2 = CONF2_patcher.start()
self.CONF2.lxd.root_dir = '/lxd'
self.CONF2.lxd.pool = None
self.CONF2.instances_path = '/i'

# LXDDriver._after_reboot reads from the database and syncs container

+ 34
- 0
nova/tests/unit/virt/lxd/test_flavor.py View File

@@ -41,6 +41,11 @@ class ToProfileTest(test.NoDBTestCase):
self.CONF = CONF_patcher.start()
self.CONF.instances_path = '/i'

CONF_patcher = mock.patch('nova.virt.lxd.flavor.CONF')
self.patchers.append(CONF_patcher)
self.CONF2 = CONF_patcher.start()
self.CONF2.lxd.pool = None

def tearDown(self):
super(ToProfileTest, self).tearDown()
for patcher in self.patchers:
@@ -104,6 +109,35 @@ class ToProfileTest(test.NoDBTestCase):
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)

def test_storage_pools(self):
self.client.host_info['api_extensions'].append('storage')
self.CONF2.lxd.pool = 'test_pool'
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name))
}
expected_devices = {
'root': {
'path': '/',
'type': 'disk',
'pool': 'test_pool',
'size': '0GB'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)

self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)

def test_to_profile_security(self):
self.client.host_info['api_extensions'].append('id_map')


+ 3
- 0
nova/virt/lxd/driver.py View File

@@ -72,6 +72,9 @@ lxd_opts = [
cfg.StrOpt('root_dir',
default='/var/lib/lxd/',
help='Default LXD directory'),
cfg.StrOpt('pool',
default=None,
help='LXD Storage pool to use with LXD >= 2.9'),
cfg.IntOpt('timeout',
default=-1,
help='Default LXD timeout'),

+ 20
- 3
nova/virt/lxd/flavor.py View File

@@ -17,12 +17,14 @@ import os
from nova import exception
from nova import i18n
from nova.virt import driver
from oslo_config import cfg
from oslo_utils import units

from nova.virt.lxd import common
from nova.virt.lxd import vif

_ = i18n._
CONF = cfg.CONF


def _base_config(instance, _):
@@ -82,7 +84,7 @@ def _root(instance, client, *_):
device = {'type': 'disk', 'path': '/'}

environment = client.host_info['environment']
if environment['storage'] in ['btrfs', 'zfs']:
if environment['storage'] in ['btrfs', 'zfs'] or CONF.lxd.pool:
device['size'] = '{}GB'.format(instance.root_gb)

specs = instance.flavor.extra_specs
@@ -111,10 +113,17 @@ def _root(instance, client, *_):
if specs.get('quota:disk_total_bytes_sec') and not minor_quota_defined:
device['limits.max'] = '{}MB'.format(
int(specs['quota:disk_total_bytes_sec']) / units.Mi)
if CONF.lxd.pool:
extensions = client.host_info.get('api_extensions', [])
if 'storage' in extensions:
device['pool'] = CONF.lxd.pool
else:
msg = _('Host does not have storage pool support')
raise exception.NovaException(msg)
return {'root': device}


def _ephemeral_storage(instance, _, __, block_info):
def _ephemeral_storage(instance, client, __, block_info):
instance_attributes = common.InstanceAttributes(instance)
ephemeral_storage = driver.block_device_info_get_ephemerals(block_info)
if ephemeral_storage:
@@ -123,11 +132,19 @@ def _ephemeral_storage(instance, _, __, block_info):
ephemeral_src = os.path.join(
instance_attributes.storage_path,
ephemeral['virtual_name'])
devices[ephemeral['virtual_name']] = {
device = {
'path': '/mnt',
'source': ephemeral_src,
'type': 'disk',
}
if CONF.lxd.pool:
extensions = client.host_info.get('api_extensions', [])
if 'storage' in extensions:
device['pool'] = CONF.lxd.pool
else:
msg = _('Host does not have storage pool support')
raise exception.NovaException(msg)
devices[ephemeral['virtual_name']] = device
return devices



Loading…
Cancel
Save