final code refactor

Signed-off-by: Chuck Short <chuck.short@canonical.com>
This commit is contained in:
Chuck Short 2015-06-22 15:18:48 -04:00
parent b1c55ad755
commit 3d4d890f71
30 changed files with 907 additions and 2262 deletions

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
*.py[cod]
*.idea
# C extensions
*.so
@ -50,4 +51,4 @@ ChangeLog
# Editors
*~
.*.swp
.*sw?
.*sw?

View File

@ -21,9 +21,9 @@ function configure_lxd {
apt_get update
sudo apt-add-repository -y ppa:ubuntu-lxc/lxc-git-master
sudo apt-add-repository -y ppa:ubuntu-lxc/lxd-git-master
# sudo apt-add-repository -y ppa:ubuntu-lxc/lxd-git-master
apt_get update
install_package lxd lxc-dev lxd
install_package lxc lxc-dev
}
function install_lxd {

View File

@ -1,346 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pwd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova.i18n import _, _LE, _LI, _LW
from nova.compute import power_state
from nova import exception
from nova import utils
import image
import profile
import vif
import container_utils
CONF = cfg.CONF
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
LXD_POWER_STATES = {
'RUNNING': power_state.RUNNING,
'STOPPED': power_state.SHUTDOWN,
'STARTING': power_state.NOSTATE,
'STOPPING': power_state.SHUTDOWN,
'ABORTING': power_state.CRASHED,
'FREEZING': power_state.PAUSED,
'FROZEN': power_state.SUSPENDED,
'THAWED': power_state.PAUSED,
'PENDING': power_state.NOSTATE,
'UNKNOWN': power_state.NOSTATE
}
class Container(object):
def __init__(self, lxd, virtapi, firewall):
self.lxd = lxd
self.virtapi = virtapi
self.firewall_driver = firewall
self.image_driver = image.load_driver(CONF.lxd.lxd_image_type,
self.lxd)
self.profile = profile.LXDProfile(self.lxd)
self.vif_driver = vif.LXDGenericDriver()
def container_rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info, recreate,
block_device_info,
preserve_ephemeral):
raise NotImplemented()
def container_start(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
try:
LOG.info(_LI('Starting container'), instance=instance)
if self.lxd.container_defined(instance.uuid):
raise exception.InstanceExists(name=instance.uuid)
self.image_driver.setup_container(context, instance, image_meta)
self.profile.profile_create(instance, network_info)
self._setup_container(instance)
self._start_container(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
self.container_destroy(context, instance, network_info,
block_device_info, destroy_disks=None,
migrate_data=None)
def container_destroy(self, context, instance, network_info,
block_device_info, destroy_disks, migrate_data):
LOG.info(_LI('Destroying container'))
try:
if not self.lxd.container_defined(instance.uuid):
return
self.lxd.container_destroy(instance.uuid)
self.container_cleanup(context, instance, network_info,
block_device_info, destroy_disks=None,
migrate_data=None)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to destroy instance: %s ') % ex)
def container_reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist')
raise exception.NovaException(msg)
return self.lxd.container_reboot(instance.uuid, 20)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to destroy instance: %s ') % ex)
def get_console_output(self, context, instance):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist')
raise exception.NovaException(msg)
console_log = container_utils.get_console_path(instance)
uid = pwd.getpwuid(os.getuid()).pw_uid
utils.execute('chown', '%s:%s' % (uid, uid),
console_log, run_as_root=True)
utils.execute('chmod', '755',
container_utils.get_container_dir(instance),
run_as_root=True)
with open(console_log , 'rb') as fp:
log_data, remaning = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
return log_data
except Exception as ex:
LOG.exception(_LE('Failed container: %s') % ex)
return ""
def container_cleanup(self, context, instance, network_info,
block_device_info, destroy_disks, migrate_data,
destroy_vifs=True):
LOG.info(_LI('Cleaning up container'))
try:
self.profile.profile_delete(instance)
self.unplug_vifs(instance, network_info)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to clean up instance: %s') % ex)
def container_state(self, instance):
try:
container_state = self.lxd.container_state(instance.uuid)
state = LXD_POWER_STATES[container_state]
except Exception:
state = power_state.NOSTATE
return state
def container_pause(self, instance):
raise NotImplementedError()
def container_unpause(self, instance):
raise NotImplementedError()
def container_suspend(self, context, instance):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _("Container is not defined")
raise exception.NovaException(msg)
self.lxd.container_suspend(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to suspend container"))
def container_resume(self, context, instance, network_info,
block_device_info=None):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist.')
raise exception.NovaException(msg)
self.lxd.container_resume(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to resume container"))
def container_rescue(self, context, instance, network_info, image_meta,
rescue_password):
raise NotImplementedError()
def container_unrescue(self, instance, network_info):
raise NotImplementedError()
def container_power_off(self, instance, timeout=0, retry_interval=0):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container is not defined')
raise exception.NovaException(msg)
self.lxd.container_stop(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.execption(_LE("Unable to power off container"))
raise NotImplementedError()
def container_power_on(self, context, instance, network_info,
block_device_info):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container is not defined')
raise exception.NovaException(msg)
self.lxd.container_start(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to power on conatainer"))
def container_soft_delete(self, instance):
raise NotImplementedError()
def container_restore(self, instance):
raise NotImplementedError()
def container_get_resource(self, nodename):
raise NotImplementedError()
def container_inject_file(self, instance, b64_path, b64_contents):
raise NotImplementedError()
def container_inject_network_info(self, instance, nw_info):
pass
def container_poll_rebooting_instances(self, timeout, instances):
raise NotImplementedError()
def container_attach_interface(self, instance, image_meta, vif):
raise NotImplementedError()
def container_detach_interface(self, instance, vif):
raise NotImplementedError()
def container_snapshot(self, context, instance, image_id,
update_task_state):
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
pass
def container_quiesce(self, context, instance, image_meta):
raise NotImplementedError()
def container_unquiesce(self, context, instance, image_meta):
raise NotImplementedError()
def _setup_container(self, instance):
LOG.debug('Setting up container')
if not os.path.exists(
container_utils.get_container_image(instance)):
msg = _('Container image doesnt exist.')
raise exception.NovaException(msg)
if instance.uuid:
container = {}
container['name'] = instance.uuid
container['profiles'] = ['%s' % instance.uuid]
container['source'] = {
'type': 'image',
'alias': instance.image_ref
}
(state, data) = self.lxd.container_init(container)
self._wait_for_container(data.get('operation').split('/')[3])
def _start_container(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
# check to see if neutron is ready before
# doing anything else
if (not self.lxd.container_running(instance.uuid) and
utils.is_neutron() and timeout):
events = self._get_neutron_events(network_info)
else:
events = {}
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
except exception.VirtualInterfaceCreateException:
LOG.info(_LW('Failed to connect networking to instance'))
(state, data) = self.lxd.container_start(instance.uuid, 20)
self._wait_for_container(data.get('operation').split('/')[3])
def _destroy_container(self, context, instance, network_info,
block_device_info,
destroy_disks, migrate_data):
if self.lxd.container_defined(instance.uuid):
msg = _('Unable to find container')
raise exception.NovaException(msg)
self.lxd.container_destroy(instance.uuid)
def plug_vifs(self, instance, network_info):
for _vif in network_info:
self.vif_driver.plug(instance, _vif)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def unplug_vifs(self, instance, network_info):
for _vif in network_info:
self.vif_driver.unplug(instance, _vif)
self.firewall_driver.unfilter_instance(instance, network_info)
def _wait_for_container(self, oid):
if not oid:
msg = _('Unable to determine container operation')
raise exception.NovaException(msg)
if not self.lxd.wait_container_operation(oid, 200, 20):
msg = _('Container creation timed out')
raise exception.NovaException(msg)
def _get_neutron_events(self, network_info):
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()

View File

@ -0,0 +1,139 @@
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from pylxd import api
from pylxd import exceptions as lxd_exceptions
from nova.i18n import _
import container_image
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LXDContainerConfig(object):
def __init__(self):
self.lxd = api.API()
self.container_dir = container_utils.LXDContainerDirectories()
self.container_utils = container_utils.LXDContainerUtils()
self.container_image = container_image.LXDContainerImage()
def create_container(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
LOG.debug('creating container')
self.create_container_profile(instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
container_config = self.create_container_config(context, instance, image_meta,
injected_files, admin_password,
network_info, block_device_info)
return container_config
def create_container_profile(self, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
LOG.debug('Creating profile config')
container_profile = {'name': instance.uuid}
self.add_value_to_config(container_profile, 'config',
{'raw.lxc':
'lxc.console.logfile = %s\n'
% self.container_dir.get_console_path(
instance)})
if network_info:
self.add_value_to_config(container_profile, 'devices',
self._get_network_devices(instance, network_info))
try:
self.lxd.profile_create(container_profile)
except lxd_exceptions.APIError as ex:
msg = _('Creating profile: %s' % ex)
raise exception.NovaException(msg)
def create_container_config(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
LOG.debug('Creating container config')
''' Generate the initial config '''
container_config = {'name': instance.uuid}
self.add_value_to_config(container_config, 'profiles', ['%s' %
instance.uuid])
self.add_value_to_config(
container_config, 'hostname', instance.hostname)
''' Fetch the image from glance and configure it '''
self.container_image.fetch_image(context, instance)
self.add_value_to_config(container_config, 'source', self.get_lxd_image(instance,
image_meta))
return container_config
def _get_lxd_config(self, instance, image_meta, container_profile):
LOG.debug('get_lxd_limits')
flavor = instance.get_flavor()
mem = flavor.memory_mb * units.Mi
vpcus = flavor.vcpus
if vcpus >= 1:
self.add_value_to_config(container_profile, 'config',
{'limits.cpus': '%s' % vcpus})
if mem >= 0:
self.add_value_to_config(container_profile, 'config',
{'limits.memory': '%s' % mem})
def get_lxd_image(self, instance, image_meta):
LOG.debug('Getting LXD image')
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
img_type = img_meta_prop.get('image_type', 'default')
if img_type == 'default':
return {'type': 'image',
'alias': instance.image_ref}
def _get_network_devices(self, instance, network_info):
LOG.debug('Get network devices')
''' ugh this is ugly'''
container_network = {}
interface_count = 0
for vif in network_info:
vif_id = vif['id'][:11]
mac = vif['address']
bridge = 'qbr%s' % vif_id
container_network.update({
'eth%s' % interface_count: {'nictype': 'bridged',
'hwaddr': mac,
'parent': bridge,
'type': 'nic'}})
interface_count = interface_count + 1
return container_network
def add_value_to_config(self, container_config, key, value):
if container_config.__contains__(key):
container_config.append(value)
else:
container_config[key] = value

View File

@ -0,0 +1,76 @@
import hashlib
import os
from oslo_config import cfg
from oslo_log import log as logging
from pylxd import api
from pylxd import exceptions as lxd_exceptions
from nova.i18n import _
from nova.openstack.common import fileutils
from nova import image
from nova import exception
from nova import utils
import container_config
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
class LXDContainerImage(object):
def __init__(self):
self.lxd = api.API()
self.container_dir = container_utils.LXDContainerDirectories()
def fetch_image(self, context, instance):
LOG.debug("Downloading image file data %(image_ref)s to LXD",
{'image_ref': instance.image_ref})
base_dir = self.container_dir.get_base_dir()
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
container_image = self.container_dir.get_container_image(instance)
if os.path.exists(container_image):
return
IMAGE_API.download(context, instance.image_ref,
dest_path=container_image)
''' Upload the image to LXD '''
with fileutils.remove_path_on_error(container_image):
try:
self.lxd.image_defined(instance.image_ref)
except lxd_exceptions.APIError as e:
if e.status_code == 404:
pass
else:
raise exception.ImageUnacceptable(image_id=instance.image_ref,
reason=_('Image already exists.'))
try:
LOG.debug('Uploading image: %s' % container_image)
self.lxd.image_upload(path=container_image)
except lxd_exceptions.APIError as e:
raise exception.ImageUnacceptable(
image_id=instance.image_ref,
reason=_('Image failed to upload: %s' % e))
try:
alias_config = {'name': instance.image_ref,
'target': self.get_container_image_md5(instance)
}
LOG.debug('Creating alias: %s' % alias_config)
self.lxd.alias_create(alias_config)
except lxd_exceptions.APIError:
raise exception.ImageUnacceptable(image_id=instance.image_ref,
reason=_('Image already exists.'))
def get_container_image_md5(self, instance):
container_image = self.container_dir.get_container_image(instance)
with open(container_image, 'rb') as fd:
return hashlib.sha256(fd.read()).hexdigest()

View File

@ -0,0 +1,179 @@
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pwd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from nova.i18n import _, _LW
from nova import exception
from nova.openstack.common import fileutils
from nova.virt import driver
from nova.virt import hardware
from nova import utils
import container_config
import container_utils
import vif
CONF = cfg.CONF
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
class LXDContainerOperations(object):
def __init__(self, virtapi):
self.virtapi = virtapi
self.container_config = container_config.LXDContainerConfig()
self.container_utils = container_utils.LXDContainerUtils()
self.container_dir = container_utils.LXDContainerDirectories()
self.vif_driver = vif.LXDGenericDriver()
def init_host(self, host):
return self.container_utils.init_lxd_host(host)
def list_instances(self):
return self.container_utils.list_containers()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
msg = ('Spawning container '
'network_info=%(network_info)s '
'image_meta=%(image_meta)s '
'instance=%(instance)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info,
'instance': instance,
'image_meta': image_meta,
'block_device_info': block_device_info})
LOG.debug(msg, instance=instance)
name = instance.uuid
if self.container_utils.container_defined(instance):
raise exception.InstanceExists(instance=name)
self.create_instance(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def create_instance(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
LOG.debug('Creating instance')
# Ensure the directory exists and is writable
fileutils.ensure_tree(self.container_dir.get_instance_dir(instance))
# Check to see if we are using swap.
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
msg = _('Swap space is not supported by LXD.')
raise exception.NovaException(msg)
# Check to see if ephemeral block devices exist.
ephemeral_gb = instance.ephemeral_gb
if ephemeral_gb > 0:
msg = _('Ephemeral block devices is not supported.')
raise exception.NovaException(msg)
container_config = self.container_config.create_container(context,
instance, image_meta,
injected_files, admin_password,
network_info, block_device_info)
LOG.debug(container_config)
self.container_utils.container_init(container_config)
self.start_instance(instance, network_info)
def start_instance(self, instance, network_info):
LOG.debug('Staring instance')
timeout = CONF.vif_plugging_timeout
# check to see if neutron is ready before
# doing anything else
if (not self.container_utils.container_running(instance) and
utils.is_neutron() and timeout):
events = self._get_neutron_events(network_info)
else:
events = {}
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
except exception.VirtualInterfaceCreateException:
LOG.info(_LW('Failed to connect networking to instance'))
(state, data) = self.container_utils.container_start(instance)
self.container_utils.wait_for_container(
data.get('operation').split('/')[3])
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self.container_utils.container_destroy(instance)
self.cleanup(context, instance, network_info, block_device_info)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
return self.container_utils.container_cleanup(instance, network_info,
block_device_info)
def get_info(self, instance):
container_info = self.container_utils.container_info(instance)
return hardware.InstanceInfo(state=container_info,
max_mem_kb=0,
mem_kb=0,
num_cpu=2,
cpu_time_ns=0)
def get_console_output(self, context, instance):
LOG.debug('in console output')
console_log = self.container_dir.get_console_path(instance)
uid = pwd.getpwuid(os.getuid()).pw_uid
utils.execute('chown', '%s:%s' % (uid, uid),
console_log, run_as_root=True)
utils.execute('chmod', '755',
self.container_dir.get_container_dir(instance),
run_as_root=True)
with open(console_log, 'rb') as fp:
log_data, remaning = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
return log_data
def _get_neutron_events(self, network_info):
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()

View File

@ -1,6 +1,3 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
@ -18,46 +15,181 @@
# under the License.
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LE
from nova.virt import images
from pylxd import api
from pylxd import exceptions as lxd_exceptions
from nova.i18n import _
from nova import exception
from nova.compute import power_state
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_base_dir():
return os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
LXD_POWER_STATES = {
'RUNNING': power_state.RUNNING,
'STOPPED': power_state.SHUTDOWN,
'STARTING': power_state.NOSTATE,
'STOPPING': power_state.SHUTDOWN,
'ABORTING': power_state.CRASHED,
'FREEZING': power_state.PAUSED,
'FROZEN': power_state.SUSPENDED,
'THAWED': power_state.PAUSED,
'PENDING': power_state.NOSTATE,
'Success': power_state.NOSTATE,
'UNKNOWN': power_state.NOSTATE
}
def get_container_image(instance):
base_dir = get_base_dir()
return os.path.join(base_dir,
'%s.tar.gz' % instance.image_ref)
class LXDContainerDirectories(object):
def __init__(self):
self.base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
def get_base_dir(self):
return self.base_dir
def get_instance_dir(self, instance):
return os.path.join(CONF.instances_path,
instance.uuid)
def get_container_image(self, instance):
return os.path.join(self.base_dir,
'%s.tar.gz' % instance.image_ref)
def get_container_configdirve(self, instance):
return os.path.join(CONF.instances_path,
instance.uuid,
'config-drive')
def get_console_path(self, instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
def get_container_dir(self, instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)
def get_container_rootfs(self, instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'rootfs')
def fetch_image(context, image, instance, max_size=0):
try:
images.fetch(context, instance.image_ref, image,
instance.user_id, instance.project_id,
max_size=max_size)
except Exception:
LOG.exception(_LE("Image %(image_id)s doesn't exist anymore on"),
{'image_id': instance.image_ref})
class LXDContainerUtils(object):
def get_console_path(instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
def __init__(self):
self.lxd = api.API()
self.container_dir = LXDContainerDirectories()
def get_container_dir(instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)
def init_lxd_host(self, host):
LOG.debug('Host check')
try:
return self.lxd.host_ping()
except lxd_exceptions.APIError as ex:
msg = _('Unable to connect to LXD daemon: %s' % ex)
exception.HostNotFound(msg)
def list_containers(self):
try:
return self.lxd.container_list()
except lxd_exceptions.APIError as ex:
msg = _('Unable to list instances: %s' % ex)
exception.NovaException(msg)
def container_defined(self, instance):
LOG.debug('Container defined')
try:
self.lxd.container_defined(instance.uuid)
except lxd_exceptions.APIError as ex:
if ex.status_code == 404:
return False
else:
return True
def container_running(self, instance):
LOG.debug('container running')
if self.lxd.container_running(instance.uuid):
return True
else:
return False
def container_start(self, instance):
LOG.debug('container start')
try:
return self.lxd.container_start(instance.uuid,
CONF.lxd.lxd_timeout)
except lxd_exceptions.APIError as ex:
msg = _('Failed to start container: %s' % ex)
raise exception.NovaException(msg)
def container_destroy(self, instance):
LOG.debug('Container destroy')
try:
return self.lxd.container_destroy(instance.uuid)
except lxd_exceptions.APIError as ex:
if ex.status_code == 404:
return
else:
msg = _('Failed to destroy container: %s' % ex)
raise exception.NovaException(msg)
def container_cleanup(self, instance, network_info, block_device_info):
LOG.debug('continer cleanup')
container_dir = self.container_dir.get_instance_dir(instance)
if os.path.exists(container_dir):
shutil.rmtree(container_dir)
self.profile_delete(instance)
def container_info(self, instance):
LOG.debug('container info')
try:
container_state = self.lxd.container_state(instance.uuid)
state = LXD_POWER_STATES[container_state]
except lxd_exceptions.APIError:
state = power_state.NOSTATE
return state
def container_init(self, container_config):
LOG.debug('container init')
try:
self.lxd.container_init(container_config)
except lxd_exceptions.APIError as ex:
msg = _('Failed to destroy container: %s' % ex)
raise exception.NovaException(msg)
def container_definfed(self, instance):
LOG.debug('container defined')
try:
self.lxd.container_defined(instance.uuid)
except lxd_exceptions.APIError as ex:
if e.status_code == 404:
return False
else:
return True
def profile_delete(self, instance):
LOG.debug('profile delete')
try:
self.lxd.profile_delete(instance.uuid)
except lxd_exceptions.APIError as ex:
msg = _('Failed to delete profile: %s' % ex)
raise exception.NovaException(msg)
def wait_for_container(self, oid):
if not oid:
msg = _('Unable to determine container operation')
raise exception.NovaException(msg)
if not self.lxd.wait_container_operation(oid, 200, 20):
msg = _('Container creation timed out')
raise exception.NovaException(msg)

View File

@ -1,6 +1,3 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
@ -17,35 +14,24 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
LXD Driver
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from pylxd import api
from nova import exception
from nova.i18n import _, _LE
from nova.i18n import _
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt import hardware
import container
import container_ops
import host
import migration
lxd_opts = [
cfg.StrOpt('lxd_root_dir',
default='/var/lib/lxd/',
help='Default LXD directory'),
cfg.StrOpt('lxd_image_type',
default='nclxd.nova.virt.lxd.image.DefaultContainerImage',
help='Default image')
cfg.IntOpt('lxd_timeout',
default=5,
help='Default LXD timeout')
]
CONF = cfg.CONF
@ -54,6 +40,8 @@ LOG = logging.getLogger(__name__)
class LXDDriver(driver.ComputeDriver):
""" LXD Lightervisor
"""
capabilities = {
"has_imagecache": False,
@ -64,31 +52,14 @@ class LXDDriver(driver.ComputeDriver):
def __init__(self, virtapi):
self.virtapi = virtapi
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.lxd = api.API()
self.container = container.Container(self.lxd,
self.virtapi,
self.firewall_driver)
self.migration = migration.Migration()
self.host = host.Host(self.lxd)
self.container_ops = container_ops.LXDContainerOperations(virtapi)
self.host = host.LXDHost()
def init_host(self, host):
try:
self.lxd.host_ping()
except Exception as ex:
LOG.exception(_LE('Unable to connect to LXD daemon: %s') % ex)
raise
return self.container_ops.init_host(host)
def get_info(self, instance):
istate = self.container.container_state(instance)
return hardware.InstanceInfo(state=istate,
max_mem_kb=0,
mem_kb=0,
num_cpu=1,
cpu_time_ns=0)
return self.container_ops.get_info(instance)
def instance_exists(self, instance):
try:
@ -96,80 +67,47 @@ class LXDDriver(driver.ComputeDriver):
except NotImplementedError:
return instance.name in self.list_instances()
def estimate_instance_overhead(self, instance_info):
return {'memory_mb': 0}
def list_instances(self):
return self.lxd.container_list()
return self.container_ops.list_instances()
def list_instance_uuids(self):
return self.lxd.container_list()
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.container.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info, ignore_errors):
try:
for vif in network_info:
self.container.unplug_vifs(instance, network_info)
except exception.Exception:
if not ignore_errors:
raise
return self.container_ops.list_instances()
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
return self.container.container_rebuild(context, instance, image_meta,
injected_files, admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False)
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
return self.container.container_start(context, instance, image_meta,
injected_files, admin_password,
network_info, block_device_info)
return self.container_ops.spawn(context, instance, image_meta,
injected_files, admin_password, network_info,
block_device_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
return self.container.container_destroy(context, instance,
network_info,
block_device_info,
destroy_disks,
migrate_data)
return self.container_ops.destroy(context, instance, network_info,
block_device_info, destroy_disks,
migrate_data)
self.cleanup(context, instance, network_info, block_device_info)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
return self.container.container_cleanup(context, instance,
network_info, block_device_info,
destroy_disks, migrate_data,
destroy_vifs)
return self.container_ops.cleanup(context, instance, network_info,
block_device_info, destroy_disks,
migrate_data, destroy_vifs)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
return self.container.container_reboot(context, instance,
network_info,
reboot_type, block_device_info,
bad_volumes_callback)
def get_console_pool_info(self, console_type):
raise NotImplementedError()
def get_console_output(self, context, instance):
return self.container.get_console_output(context, instance)
def get_vnc_console(self, context, instance):
raise NotImplementedError()
def get_spice_console(self, context, instance):
raise NotImplementedError()
def get_rdp_console(self, context, instance):
raise NotImplementedError()
def get_serial_console(self, context, instance):
raise NotImplementedError()
return self.container_ops.get_console_output(context, instance)
def get_diagnostics(self, instance):
raise NotImplementedError()
@ -188,22 +126,21 @@ class LXDDriver(driver.ComputeDriver):
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self.volume.container_attach(context, connection_info,
instance, mountpoint,
disk_bus, device_type,
encryption)
raise NotImplemented()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self.volume.container_detach_volume(connection_info, instance,
mountpoint, encryption)
raise NotImplemented()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
raise NotImplementedError()
def attach_interface(self, instance, image_meta, vif):
return self.container.container_attach_interface(instance, image_meta,
vif)
raise NotImplementedError()
def detach_interface(self, instance, vif):
return self.container.containre_detach_interface(instance, vif)
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
@ -212,8 +149,7 @@ class LXDDriver(driver.ComputeDriver):
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
return self.container.snapshot(context, instance, image_id,
update_task_state)
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
pass
@ -231,43 +167,40 @@ class LXDDriver(driver.ComputeDriver):
raise NotImplementedError()
def pause(self, instance):
return self.container.container_pause(instance)
raise NotImplementedError()
def unpause(self, instance):
return self.container.container_unpause(instance)
raise NotImplementedError()
def suspend(self, context, instance):
return self.container.container_suspend(context, instance)
raise NotImplementedError()
def resume(self, context, instance, network_info, block_device_info=None):
return self.container.container_resume(context, instance,
network_info,
block_device_info)
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
return self.container.container_rescue(context, instance,
network_info, image_meta,
rescue_password)
raise NotImplementedError()
def unrescue(self, instance, network_info):
return self.container.container_unrescue(instance, network_info)
raise NotImplementedError()
def power_off(self, instance, timeout=0, retry_interval=0):
return self.container.container_power_off(instance, timeout,
retry_interval)
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
return self.container.container_power_on(context, instance,
network_info,
block_device_info)
raise NotImplementedError()
def soft_delete(self, instance):
return self.container.container_soft_deelte(instance)
raise NotImplementedError()
def restore(self, instance):
return self.container.container_restore(instance)
raise NotImplementedError()
def get_available_resource(self, nodename):
return self.host.get_available_resource(nodename)
@ -322,32 +255,49 @@ class LXDDriver(driver.ComputeDriver):
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
dest_check_data, block_device_info=None):
raise NotImplementedError()
def get_instance_disk_info(self, instance,
block_device_info=None):
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
raise NotImplementedError()
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
raise NotImplementedError()
def reset_network(self, instance):
pass
def ensure_filtering_rules_for_instance(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
raise NotImplementedError()
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
def unfilter_instance(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
raise NotImplementedError()
def set_admin_password(self, instance, new_pass):
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
pass
def inject_network_info(self, instance, nw_info):
pass
@ -367,17 +317,31 @@ class LXDDriver(driver.ComputeDriver):
return self.host.get_host_uptime()
def get_host_cpu_stats(self):
return self.host.get_host_cpu_stats()
raise NotImplementedError()
def block_stats(self, instance, disk_id):
return [0, 0, 0, 0, None] # zulcss - fixme
raise NotImplementedError()
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
def macs_for_instance(self, instance):
return None
def manage_image_cache(self, context, all_instances):
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
raise NotImplementedError()
def get_volume_connector(self, instance):
raise NotImplementedError()
@ -397,32 +361,6 @@ class LXDDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
return False
def register_event_listener(self, callback):
self._compute_event_callback = callback
def emit_event(self, event):
if not self._compute_event_callback:
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
return True
@property
def need_legacy_block_device_info(self):
return True
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
raise NotImplementedError()
@ -431,9 +369,15 @@ class LXDDriver(driver.ComputeDriver):
snapshot_id, delete_info):
raise NotImplementedError()
def default_root_device_name(self, instance, image_meta, root_bdm):
raise NotImplementedError()
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
raise NotImplementedError()
def quiesce(self, context, instance, image_meta):
return self.container.container_quiesce(context, instance, image_meta)
raise NotImplementedError()
def unquiesce(self, context, instance, image_meta):
return self.container.container_unquiesce(context, instance,
image_meta)
raise NotImplementedError()

View File

@ -30,27 +30,30 @@ from nova.compute import arch
from nova.compute import hv_type
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.i18n import _LW
from nova.i18n import _LW, _
from nova import exception
from nova import utils
from cpuinfo import cpuinfo
from pylxd import api
import psutil
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Host(object):
def __init__(self, lxd):
self.lxd = lxd
self.host_cpu_info = cpuinfo.get_cpu_info()
class LXDHost(object):
def __init__(self):
self.lxd = api.API()
def get_available_resource(self, nodename):
local_cpu_info = self._get_cpu_info()
LOG.debug('In get_available_resource')
local_cpu_info = self._get_cpuinfo()
cpu_topology = local_cpu_info['topology']
vcpus = (cpu_topology['cores'] *
cpu_topology['sockets'] *
cpu_topology['threads'])
vcpus = (int(cpu_topology['cores']) *
int(cpu_topology['sockets']) *
int(cpu_topology['threads']))
local_memory_info = self._get_memory_mb_usage()
local_disk_info = self._get_fs_info(CONF.lxd.lxd_root_dir)
@ -63,20 +66,22 @@ class Host(object):
'local_gb_used': local_disk_info['used'] / units.Gi,
'vcpus_used': 0,
'hypervisor_type': 'lxd',
'hypervisor_version': 1,
'hypervisor_version': '011',
'cpu_info': jsonutils.dumps(local_cpu_info),
'hypervisor_hostname': platform.node(),
'supported_instances': jsonutils.dumps(
[(arch.I686, hv_type.LXC, vm_mode.EXE),
[(arch.I686, hv_type.LXC, vm_mode.EXE),
(arch.X86_64, hv_type.LXC, vm_mode.EXE)]),
'numa_topology': None,
}
return data
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warn(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
@ -120,33 +125,53 @@ class Host(object):
'used': (total - avail) * 1024
}
def _get_cpu_info(self):
def _get_cpuinfo(self):
cpuinfo = self._get_cpu_info()
cpu_info = dict()
cpu_info['arch'] = platform.uname()[5]
cpu_info['model'] = self.host_cpu_info['brand']
cpu_info['vendor'] = self.host_cpu_info['vendor_id']
cpu_info['model'] = cpuinfo.get('model name', 'unknown')
cpu_info['vendor'] = cpuinfo.get('vendor id', 'unknown')
topology = dict()
topology['sockets'] = self._get_cpu_sockets()
topology['cores'] = self._get_cpu_cores()
topology['threads'] = 1 # fixme
topology['sockets'] = cpuinfo.get('socket(s)', 1)
topology['cores'] = cpuinfo.get('core(s) per socket', 1)
topology['threads'] = cpuinfo.get('thread(s) per core', 1)
cpu_info['topology'] = topology
cpu_info['features'] = self.host_cpu_info['flags']
cpu_info['features'] = cpuinfo.get('flags', 'unknown')
return cpu_info
def _get_cpu_cores(self):
try:
return psutil.cpu_count()
except Exception:
return psutil.NUM_CPUS
def _get_cpu_info(self):
''' Parse the output of lscpu. '''
cpuinfo = {}
out, err = utils.execute('lscpu')
if err:
msg = _('Unable to parse lscpu output.')
exception.NovaException(msg)
def _get_cpu_sockets(self):
try:
return psutil.cpu_count(Logical=False)
except Exception:
return psutil.NUM_CPUS
cpu = [line.strip('\n') for line in out.splitlines()]
for line in cpu:
if line.strip():
name, value = line.split(':', 1)
name = name.strip().lower()
cpuinfo[name] = value.strip()
f = open('/proc/cpuinfo', 'r')
features = [line.strip('\n') for line in f.readlines()]
for line in features:
if line.strip():
if line.startswith('flags'):
name, value = line.split(':', 1)
name = name.strip().lower()
cpuinfo[name] = value.strip()
return cpuinfo
def _get_hypersivor_version(self):
version = self.lxd.get_lxd_version()
return '.'.join(str(v) for v in version)
def get_host_cpu_stats(self):
return {

View File

@ -1,56 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_blocks * hddinfo.f_bsize
available = hddinfo.f_bavail * hddinfo.f_bsize
used = total - available
return {'total': total,
'available': available,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}

View File

@ -1,142 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from nova.i18n import _, _LE
from nova import exception
from nova.openstack.common import fileutils
from nova import utils
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_driver(default, *args, **kwargs):
image_class = importutils.import_class(CONF.lxd.lxd_image_type)
return image_class(*args, **kwargs)
def fetch_image(client, context, image, instance):
try:
if image not in client.image_list():
if not os.path.exists(container_utils.get_base_dir()):
fileutils.ensure_tree(container_utils.get_base_dir())
container_image = container_utils.get_container_image(
instance)
container_utils.fetch_image(context, container_image, instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error downloading image: %(instance)'
' %(image)s'),
{'instance': instance.uuid,
'image': instance.image_ref})
class BaseContainerImage(object):
def __init__(self, lxd):
self.lxd = lxd
def setup_container(self, context, instance, image_meta):
pass
def destory_contianer(self, instance, image_meta):
pass
class DefaultContainerImage(object):
def __init__(self, lxd):
self.lxd = lxd
def setup_container(self, context, instance, image_meta):
LOG.debug("Setting up Container")
container_image = container_utils.get_container_image(instance)
try:
if instance.image_ref in self.lxd.image_list():
return
if os.path.exists(container_image):
return
fetch_image(self.lxd, context,
instance.image_ref, instance)
self._upload_image(container_image, instance, image_meta)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to setup container: %s = %s'),
(instance.uuid, ex))
self.destroy_contianer(instance, image_meta)
raise
def _upload_image(self, container_image, instance, image_meta):
if not self._check_image_file(container_image, image_meta):
msg = _('md5checksum mismtach')
raise exception.NovaException(msg)
if not self.lxd.image_upload(container_image,
container_image.split('/')[-1]):
msg = _('Image upload failed')
raise exception.NovaException(msg)
config = {'target': self._get_lxd_md5sum(container_image),
'name': instance.image_ref}
if not self.lxd.alias_create(config):
msg = _('Alias creation failed')
raise exception.NovaException(msg)
def _check_image_file(self, container_image, image_meta):
md5sum = self._get_glance_md5sum(container_image)
if image_meta.get('checksum') == md5sum:
return True
else:
return False
def _get_glance_md5sum(self, container_image):
out, err = utils.execute('md5sum', container_image)
return out.split(' ')[0]
def _get_lxd_md5sum(self, container_image):
with open(container_image, 'rb') as fd:
return hashlib.sha256(fd.read()).hexdigest()
def _image_rollback(self, container_image):
if os.path.exists(container_image):
os.unlink(container_image)
def destroy_container(self, instance, image_meta):
LOG.debug('Destroying container')
container_image = container_utils.get_container_image(instance)
if instance.image_ref in self.lxd.alias_list():
self.lxd.alias_delete(instance.image_ref)
fingerprint = self._get_lxd_md5sum(container_image)
if fingerprint in self.lxd.image_list():
self.lxd.image_delete(fingerprint)
if os.path.exists(container_image):
os.unlink(container_image)

View File

@ -1,92 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.i18n import _
class Migration(object):
def __init__(self):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
pass
def post_live_migration_at_source(self, context, instance, network_info):
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
raise NotImplementedError()
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
raise NotImplementedError()

View File

@ -1,85 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LXDProfile(object):
def __init__(self, lxd):
self.lxd = lxd
''' Prefetch information that we need about the host.'''
self.host = self.lxd.host_info()
def profile_create(self, instance, network_info):
LOG.debug('Creating host profile')
profile = {'name': instance.uuid,
'config': {'raw.lxc':
'lxc.console.logfile = %s\n'
% container_utils.get_console_path(instance)}
}
if network_info:
profile['devices'] = self._get_network_devices(network_info)
if instance:
profile = self._get_container_limits(instance, profile)
if not self.lxd.profile_create(profile):
msg = _('Failed to create profile')
raise exception.NovaException(msg)
def profile_delete(self, instance):
if not self.lxd.profile_delete(instance.uuid):
msg = _('Unable to delete profile')
raise exception.NovaException(msg)
def _get_container_limits(self, instance, profile):
LOG.debug("Setting container limits")
if instance.vcpus >= 1:
profile['config'].update({'limits.cpus': '%s'
% instance.vcpus})
if instance.memory_mb >= 0:
profile['config'].update({'limits.memory': instance.memory_mb})
return profile
def _get_network_devices(self, network_info):
for vif in network_info:
vif_id = vif['id'][:11]
vif_type = vif['type']
bridge = vif['network']['bridge']
mac = vif['address']
if vif_type == 'ovs':
bridge = 'qbr%s' % vif_id
return {'eth0': {'nictype': 'bridged',
'hwaddr': mac,
'parent': bridge,
'type': 'nic'}}

View File

@ -1,76 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pwd
import grp
class LXCIdMap(object):
def __init__(self, ustart, unum, gstart, gnum):
self.ustart = int(ustart)
self.unum = int(unum)
self.gstart = int(gstart)
self.gnum = int(gnum)
def usernsexec_margs(self, with_read=None):
if with_read:
if with_read == "user":
with_read = os.getuid()
unum = self.unum - 1
rflag = ['-m', 'u:%s:%s:1' % (self.ustart + self.unum, with_read)]
print(
"================ rflag: %s ==================" %
(str(rflag)))
else:
unum = self.unum
rflag = []
return ['-m', 'u:0:%s:%s' % (self.ustart, unum),
'-m', 'g:0:%s:%s' % (self.gstart, self.gnum)] + rflag
def lxc_conf_lines(self):
return (('lxc.id_map', 'u 0 %s %s' % (self.ustart, self.unum)),
('lxc.id_map', 'g 0 %s %s' % (self.gstart, self.gnum)))
def get_user(self):
return (self.ustart, self.gstart)
class LXCUserIdMap(LXCIdMap):
def __init__(self, user=None, group=None, subuid_f="/etc/subuid",
subgid_f="/etc/subgid"):
if user is None:
user = pwd.getpwuid(os.getuid())[0]
if group is None:
group = grp.getgrgid(os.getgid()).gr_name
def parse_sfile(fname, name):
line = None
with open(fname, "r") as fp:
for cline in fp:
if cline.startswith(name + ":"):
line = cline
break
if line is None:
raise ValueError("%s not found in %s" % (name, fname))
toks = line.split(":")
return (toks[1], toks[2])
ustart, unum = parse_sfile(subuid_f, user)
gstart, gnum = parse_sfile(subgid_f, group)
self.user = user
self.group = group
super(LXCUserIdMap, self).__init__(ustart, unum, gstart, gnum)

View File

@ -1,32 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Volume(object):
def __init__(object):
pass
def container_attach_volume(self, context, connection_info, instance,
mountpoint, disk_bus=None, device_type=None,
encryption=None):
raise NotImplementedError()
def container_detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
raise NotImplementedError()

View File

@ -0,0 +1,24 @@
import contextlib
import platform
import mock
from oslo_config import cfg
from nova import context
from nova import test
from nova.virt import fake
from nova.tests.unit import fake_network
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import driver
from nova import exception
from nova import utils
from nclxd.nova.virt.lxd import container_config
CONF = cfg.CONF
class LXDTestContainerConfig(test.NoDBTestCase):
def setUp(self):
super(LXDTestContainerConfig, self).setUp()
self.container_config = container_config.LXDContainerConfig()

View File

@ -0,0 +1,23 @@
import contextlib
import platform
import mock
from oslo_config import cfg
from nova import context
from nova import test
from nova.virt import fake
from nova.tests.unit import fake_network
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import driver
from nova import exception
from nova import utils
from nclxd.nova.virt.lxd import container_ops
CONF = cfg.CONF
class LXDTestContainerOps(test.NoDBTestCase):
def setUp(self):
super(LXDTestContainerOps, self).setUp()

View File

@ -0,0 +1,111 @@
import os
import mock
import shutil
from oslo_config import cfg
import pylxd
from nova import test
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import container_utils
CONF = cfg.CONF
class LXDTestContainerDirectory(test.NoDBTestCase):
def setUp(self):
super(LXDTestContainerDirectory, self).setUp()
self.container_dir = container_utils.LXDContainerDirectories()
def test_get_base_dir(self):
path = self.container_dir.get_base_dir()
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.assertEqual(expected_path, path)
def test_get_container_dir(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_container_dir(instance)
expected_path = os.path.join(CONF.instances_path,
instance.uuid)
self.assertEqual(expected_path, path)
def test_get_container_image(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_container_image(instance)
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'%s.tar.gz' % instance.image_ref)
self.assertEqual(expected_path, path)
def test_get_container_configdrive(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_container_configdirve(instance)
expected_path = os.path.join(CONF.instances_path,
instance.uuid,
'config-drive')
self.assertEqual(expected_path, path)
def test_get_console_path(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_console_path(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
self.assertEqual(expected_path, path)
def test_get_container_dir(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_container_dir(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)
self.assertEqual(expected_path, path)
def test_get_container_rootfs(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = self.container_dir.get_container_rootfs(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'rootfs')
self.assertEqual(expected_path, path)
class LXDTestContainerUtils(test.NoDBTestCase):
def setUp(self):
super(LXDTestContainerUtils, self).setUp()
self.container_utils = container_utils.LXDContainerUtils()
@mock.patch.object(pylxd.api.API, 'host_ping')
def test_init_lxd_host(self, mock_ping):
mock_ping.return_value = True
self.assertTrue(self.container_utils.init_lxd_host("fakehost"))
@mock.patch.object(pylxd.api.API, 'container_list')
def test_container_list(self, mock_container_list):
mock_container_list.return_value = ['instance-0001',
'instance-0002']
self.assertEqual(len(self.container_utils.list_containers()), 2)
@mock.patch.object(pylxd.api.API, 'container_start')
def test_container_start(self, mock_container_start):
mock_container_start.return_value = True
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
self.assertTrue(self.container_utils.container_start(instance))
@mock.patch.object(pylxd.api.API, 'container_destroy')
def test_container_destroy(self, mock_container_destroy):
mock_container_destroy.return_value = True
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
self.assertTrue(self.container_utils.container_destroy(instance))

View File

@ -0,0 +1,32 @@
import contextlib
import platform
import mock
from oslo_config import cfg
from nova import context
from nova import test
from nova.virt import fake
from nova.tests.unit import fake_network
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import driver
from nova import exception
from nova import utils
from nclxd.nova.virt.lxd import container_ops
CONF = cfg.CONF
class LXDTestDriver(test.NoDBTestCase):
def setUp(self):
super(LXDTestDriver, self).setUp()
self.connection = driver.LXDDriver(fake.FakeVirtAPI())
def test_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
self.assertFalse(self.connection.capabilities[
'supports_migrate_to_same_host'])

View File

@ -1,104 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import platform
import mock
from oslo_config import cfg
from oslo_utils import units
from nova import test
from nova.virt import fake
from nclxd.nova.virt.lxd import driver
from nclxd.nova.virt.lxd import host
from nova import utils
CONF = cfg.CONF
class LXDTestHostCase(test.NoDBTestCase):
def setUp(self):
super(LXDTestHostCase, self).setUp()
self.connection = driver.LXDDriver(fake.FakeVirtAPI())
def test_get_available_resource(self):
memory = {
'total': 4 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 10 * units.Gi,
'available': 3 * units.Gi,
'used': 1 * units.Gi
}
cpu_info = {
'arch': 'x86_64',
'model': 'Intel(R) Pentium(R) CPU J2900 @ 2.41GHz',
'vendor': 'GenuineIntel',
'sockets': 1,
'cores': 4,
'threads': 1,
'topology': {'sockets': 1,
'cores': 4,
'threads': 1
},
'features': 'fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov'
'pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe '
'syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_'
'good nopl xtopology nonstop_tsc aperfmperf pni pclmul'
'qdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16'
'xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer'
'rdrand lahf_lm 3dnowprefetch ida arat epb dtherm tpr_shadow'
' vnmi flexpriority ept vpid tsc_adjust smep erms'
}
with contextlib.nested(
mock.patch.object(host.Host, '_get_fs_info',
return_value=disk),
mock.patch.object(host.Host, '_get_memory_mb_usage',
return_value=memory),
mock.patch.object(host.Host, '_get_cpu_info',
return_value=cpu_info)
) as (
_get_fs_info,
_get_memory_mb_usage,
_get_cpu_info
):
stats = self.connection.get_available_resource("compute1")
self.assertEquals(stats['vcpus'], 4)
self.assertEquals(stats['memory_mb'], 4)
self.assertEquals(stats['memory_mb_used'], 1)
self.assertEquals(stats['local_gb'], 10)
self.assertEquals(stats['local_gb_used'], 1)
self.assertEquals(stats['vcpus_used'], 0)
self.assertEquals(stats['hypervisor_type'], 'lxd')
self.assertEquals(stats['hypervisor_version'], 1)
self.assertEquals(stats['hypervisor_hostname'], platform.node())
def test_get_host_ip_addr(self):
ip = self.connection.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
#@mock.patch('nova.utils.execute')
#def test_get_host_uptime(self, mock_execute):
# self.connection.get_host_uptime()
# mock_execute.assert_has_calls([
# mock.call('env', 'LANG=C', 'uptime')])

View File

@ -1,71 +0,0 @@
# Copyright 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
from oslo_config import cfg
from nova import test
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import container_utils
CONF = cfg.CONF
class LXDUitlsTestCase(test.NoDBTestCase):
def test_get_base_dir(self):
path = container_utils.get_base_dir()
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.assertEqual(expected_path, path)
def test_get_container_image(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_container_image(instance)
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'%s.tar.gz' % instance.image_ref)
self.assertEqual(expected_path, path)
def test_get_console_path(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_console_path(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
self.assertEqual(expected_path, path)
def test_get_container_dir(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_container_dir(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)
self.assertEqual(expected_path, path)
@mock.patch('nova.virt.images.fetch')
def test_fetch_image(self, mock_images):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
context = 'opaque context'
target = '/tmp/targetfile'
container_utils.fetch_image(context, target, instance)
mock_images.assert_called_once_with(context, None, target,
instance.user_id, instance.project_id,
max_size=0)

View File

@ -1,48 +0,0 @@
import contextlib
import mock
from oslo_config import cfg
from nova import test
from nova.network import linux_net
from nova.network import model as network_model
from nclxd.nova.virt.lxd import driver as lxd_driver
from nova import exception
from nova import utils
cfg = cfg.CONF
class LXDVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
def setUp(self):
super(LXDVifTestCase(), self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)

View File

@ -11,4 +11,3 @@ oslo.utils>=1.2.0 # Apache-2.0
oslo.i18n>=1.3.0 # Apache-2.0
oslo.log
-e git://github.com/lxc/pylxd.git#egg=pylxd
py-cpuinfo

View File

@ -1,123 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to cleanup old XenServer /var/lock/sm locks.
XenServer 5.6 and 6.0 do not appear to always cleanup locks when using a
FileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998)
locks laying around, builds will begin to fail because we can't create any
additional locks. This cleanup script is something we can run periodically as
a stop-gap measure until this is fixed upstream.
This script should be run on the dom0 of the affected machine.
"""
import errno
import optparse
import os
import sys
import time
BASE = '/var/lock/sm'
def _get_age_days(secs):
return float(time.time() - secs) / 86400
def _parse_args():
parser = optparse.OptionParser()
parser.add_option("-d", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="don't actually remove locks")
parser.add_option("-l", "--limit",
action="store", type='int', dest="limit",
default=sys.maxint,
help="max number of locks to delete (default: no limit)")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="don't print status messages to stdout")
options, args = parser.parse_args()
try:
days_old = int(args[0])
except (IndexError, ValueError):
parser.print_help()
sys.exit(1)
return options, days_old
def main():
options, days_old = _parse_args()
if not os.path.exists(BASE):
print >> sys.stderr, "error: '%s' doesn't exist. Make sure you're"\
" running this on the dom0." % BASE
sys.exit(1)
lockpaths_removed = 0
nspaths_removed = 0
for nsname in os.listdir(BASE)[:options.limit]:
nspath = os.path.join(BASE, nsname)
if not os.path.isdir(nspath):
continue
# Remove old lockfiles
removed = 0
locknames = os.listdir(nspath)
for lockname in locknames:
lockpath = os.path.join(nspath, lockname)
lock_age_days = _get_age_days(os.path.getmtime(lockpath))
if lock_age_days > days_old:
lockpaths_removed += 1
removed += 1
if options.verbose:
print 'Removing old lock: %03d %s' % (lock_age_days,
lockpath)
if not options.dry_run:
os.unlink(lockpath)
# Remove empty namespace paths
if len(locknames) == removed:
nspaths_removed += 1
if options.verbose:
print 'Removing empty namespace: %s' % nspath
if not options.dry_run:
try:
os.rmdir(nspath)
except OSError, e:
if e.errno == errno.ENOTEMPTY:
print >> sys.stderr, "warning: directory '%s'"\
" not empty" % nspath
else:
raise
if options.dry_run:
print "** Dry Run **"
print "Total locks removed: ", lockpaths_removed
print "Total namespaces removed: ", nspaths_removed
if __name__ == '__main__':
main()

View File

@ -1,68 +0,0 @@
"""
destroy_cached_images.py
This script is used to clean up Glance images that are cached in the SR. By
default, this script will only cleanup unused cached images.
Options:
--dry_run - Don't actually destroy the VDIs
--all_cached - Destroy all cached images instead of just unused cached
images.
"""
import eventlet
eventlet.monkey_patch()
import os
import sys
from oslo.config import cfg
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from nova import config
from nova import utils
from nova.virt.xenapi import driver as xenapi_driver
from nova.virt.xenapi import vm_utils
destroy_opts = [
cfg.BoolOpt('all_cached',
default=False,
help='Destroy all cached images instead of just unused cached'
' images.'),
cfg.BoolOpt('dry_run',
default=False,
help='Don\'t actually delete the VDIs.')
]
CONF = cfg.CONF
CONF.register_cli_opts(destroy_opts)
def main():
config.parse_args(sys.argv)
utils.monkey_patch()
xenapi = xenapi_driver.XenAPIDriver()
session = xenapi._session
sr_ref = vm_utils.safe_find_sr(session)
destroyed = vm_utils.destroy_cached_images(
session, sr_ref, all_cached=CONF.all_cached,
dry_run=CONF.dry_run)
if '--verbose' in sys.argv:
print '\n'.join(destroyed)
print "Destroyed %d cached VDIs" % len(destroyed)
if __name__ == "__main__":
main()

View File

@ -1,103 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
One-time script to populate VDI.other_config.
We use metadata stored in VDI.other_config to associate a VDI with a given
instance so that we may safely cleanup orphaned VDIs.
We had a bug in the code that meant that the vast majority of VDIs created
would not have the other_config populated.
After deploying the fixed code, this script is intended to be run against all
compute-workers in a cluster so that existing VDIs can have their other_configs
populated.
Run on compute-worker (not Dom0):
python ./tools/xenserver/populate_other_config.py [--dry-run|--verbose]
"""
import os
import sys
possible_topdir = os.getcwd()
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
from nova import config
from nova.openstack.common import uuidutils
from nova.virt import virtapi
from nova.virt.xenapi import driver as xenapi_driver
from nova.virt.xenapi import vm_utils
from oslo.config import cfg
cli_opts = [
cfg.BoolOpt('dry-run',
default=False,
help='Whether to actually update other_config.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
def main():
config.parse_args(sys.argv)
xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
session = xenapi._session
vdi_refs = session.call_xenapi('VDI.get_all')
for vdi_ref in vdi_refs:
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
other_config = vdi_rec['other_config']
# Already set...
if 'nova_instance_uuid' in other_config:
continue
name_label = vdi_rec['name_label']
# We only want name-labels of form instance-<UUID>-[optional-suffix]
if not name_label.startswith('instance-'):
continue
# Parse out UUID
instance_uuid = name_label.replace('instance-', '')[:36]
if not uuidutils.is_uuid_like(instance_uuid):
print "error: name label '%s' wasn't UUID-like" % name_label
continue
vdi_type = vdi_rec['name_description']
# We don't need a full instance record, just the UUID
instance = {'uuid': instance_uuid}
if not CONF.dry_run:
vm_utils._set_vdi_info(session, vdi_ref, vdi_type, name_label,
vdi_type, instance)
if CONF.verbose:
print "Setting other_config for instance_uuid=%s vdi_uuid=%s" % (
instance_uuid, vdi_rec['uuid'])
if CONF.dry_run:
print "Dry run completed"
if __name__ == "__main__":
main()

View File

@ -1,65 +0,0 @@
#!/bin/bash
set -eux
# Script to rotate console logs
#
# Should be run on Dom0, with cron, every minute:
# * * * * * /root/rotate_xen_guest_logs.sh
#
# Should clear out the guest logs on every boot
# because the domain ids may get re-used for a
# different tenant after the reboot
#
# /var/log/xen/guest should be mounted into a
# small loopback device to stop any guest being
# able to fill dom0 file system
log_dir="/var/log/xen/guest"
kb=1024
max_size_bytes=$(($kb*$kb))
truncated_size_bytes=$((5*$kb))
list_domains=/opt/xensource/bin/list_domains
log_file_base="${log_dir}/console."
tmp_file_base="${log_dir}/tmp.console."
# Ensure logging is setup correctly for all domains
xenstore-write /local/logconsole/@ "${log_file_base}%d"
# Move logs we want to keep
domains=$($list_domains | sed '/^id*/d' | sed 's/|.*|.*$//g' | xargs)
for i in $domains; do
log="${log_file_base}$i"
tmp="${tmp_file_base}$i"
mv $log $tmp || true
done
# Delete all console logs,
# mostly to remove logs from recently killed domains
rm -f ${log_dir}/console.*
# Reload domain list, in case it changed
# (note we may have just deleted a new console log)
domains=$($list_domains | sed '/^id*/d' | sed 's/|.*|.*$//g' | xargs)
for i in $domains; do
log="${log_file_base}$i"
tmp="${tmp_file_base}$i"
if [ -e "$tmp" ]; then
size=$(stat -c%s "$tmp")
# Trim the log if required
if [ "$size" -gt "$max_size_bytes" ]; then
tail -c $truncated_size_bytes $tmp > $log || true
else
mv $tmp $log || true
fi
fi
# Notify xen that it needs to reload the file
xenstore-write /local/logconsole/$i $log
xenstore-rm /local/logconsole/$i
done
# Delete all the tmp files
rm -f ${tmp_file_base}* || true

View File

@ -1,172 +0,0 @@
"""
This script concurrently builds and migrates instances. This can be useful when
troubleshooting race-conditions in virt-layer code.
Expects:
novarc to be sourced in the environment
Helper Script for Xen Dom0:
# cat /tmp/destroy_cache_vdis
#!/bin/bash
xe vdi-list | grep "Glance Image" -C1 | grep "^uuid" | awk '{print $5}' |
xargs -n1 -I{} xe vdi-destroy uuid={}
"""
import argparse
import contextlib
import multiprocessing
import subprocess
import sys
import time
DOM0_CLEANUP_SCRIPT = "/tmp/destroy_cache_vdis"
def run(cmd):
ret = subprocess.call(cmd, shell=True)
if ret != 0:
print >> sys.stderr, "Command exited non-zero: %s" % cmd
@contextlib.contextmanager
def server_built(server_name, image_name, flavor=1, cleanup=True):
run("nova boot --image=%(image_name)s --flavor=%(flavor)s"
" --poll %(server_name)s" % locals())
try:
yield
finally:
if cleanup:
run("nova delete %(server_name)s" % locals())
@contextlib.contextmanager
def snapshot_taken(server_name, snapshot_name, cleanup=True):
run("nova image-create %(server_name)s %(snapshot_name)s"
" --poll" % locals())
try:
yield
finally:
if cleanup:
run("nova image-delete %(snapshot_name)s" % locals())
def migrate_server(server_name):
run("nova migrate %(server_name)s --poll" % locals())
cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals()
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
status = stdout.strip()
if status.upper() != 'VERIFY_RESIZE':
print >> sys.stderr, "Server %(server_name)s failed to rebuild"\
% locals()
return False
# Confirm the resize
run("nova resize-confirm %(server_name)s" % locals())
return True
def test_migrate(context):
count, args = context
server_name = "server%d" % count
cleanup = args.cleanup
with server_built(server_name, args.image, cleanup=cleanup):
# Migrate A -> B
result = migrate_server(server_name)
if not result:
return False
# Migrate B -> A
return migrate_server(server_name)
def rebuild_server(server_name, snapshot_name):
run("nova rebuild %(server_name)s %(snapshot_name)s --poll" % locals())
cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals()
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
status = stdout.strip()
if status != 'ACTIVE':
print >> sys.stderr, "Server %(server_name)s failed to rebuild"\
% locals()
return False
return True
def test_rebuild(context):
count, args = context
server_name = "server%d" % count
snapshot_name = "snap%d" % count
cleanup = args.cleanup
with server_built(server_name, args.image, cleanup=cleanup):
with snapshot_taken(server_name, snapshot_name, cleanup=cleanup):
return rebuild_server(server_name, snapshot_name)
def _parse_args():
parser = argparse.ArgumentParser(
description='Test Nova for Race Conditions.')
parser.add_argument('tests', metavar='TESTS', type=str, nargs='*',
default=['rebuild', 'migrate'],
help='tests to run: [rebuilt|migrate]')
parser.add_argument('-i', '--image', help="image to build from",
required=True)
parser.add_argument('-n', '--num-runs', type=int, help="number of runs",
default=1)
parser.add_argument('-c', '--concurrency', type=int, default=5,
help="number of concurrent processes")
parser.add_argument('--no-cleanup', action='store_false', dest="cleanup",
default=True)
parser.add_argument('-d', '--dom0-ips',
help="IP of dom0's to run cleanup script")
return parser.parse_args()
def main():
dom0_cleanup_script = DOM0_CLEANUP_SCRIPT
args = _parse_args()
if args.dom0_ips:
dom0_ips = args.dom0_ips.split(',')
else:
dom0_ips = []
start_time = time.time()
batch_size = min(args.num_runs, args.concurrency)
pool = multiprocessing.Pool(processes=args.concurrency)
results = []
for test in args.tests:
test_func = globals().get("test_%s" % test)
if not test_func:
print >> sys.stderr, "test '%s' not found" % test
sys.exit(1)
contexts = [(x, args) for x in range(args.num_runs)]
try:
results += pool.map(test_func, contexts)
finally:
if args.cleanup:
for dom0_ip in dom0_ips:
run("ssh root@%(dom0_ip)s %(dom0_cleanup_script)s"
% locals())
success = all(results)
result = "SUCCESS" if success else "FAILED"
duration = time.time() - start_time
print "%s, finished in %.2f secs" % (result, duration)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()

View File

@ -1,128 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is designed to cleanup any VHDs (and their descendents) which have
a bad parent pointer.
The script needs to be run in the dom0 of the affected host.
The available actions are:
- print: display the filenames of the affected VHDs
- delete: remove the affected VHDs
- move: move the affected VHDs out of the SR into another directory
"""
import glob
import os
import subprocess
import sys
class ExecutionFailed(Exception):
def __init__(self, returncode, stdout, stderr, max_stream_length=32):
self.returncode = returncode
self.stdout = stdout[:max_stream_length]
self.stderr = stderr[:max_stream_length]
self.max_stream_length = max_stream_length
def __repr__(self):
return "<ExecutionFailed returncode=%s out='%s' stderr='%s'>" % (
self.returncode, self.stdout, self.stderr)
__str__ = __repr__
def execute(cmd, ok_exit_codes=None):
if ok_exit_codes is None:
ok_exit_codes = [0]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode not in ok_exit_codes:
raise ExecutionFailed(proc.returncode, stdout, stderr)
return proc.returncode, stdout, stderr
def usage():
print "usage: %s <SR PATH> <print|delete|move>" % sys.argv[0]
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
sr_path = sys.argv[1]
action = sys.argv[2]
if action not in ('print', 'delete', 'move'):
usage()
if action == 'move':
if len(sys.argv) < 4:
print "error: must specify where to move bad VHDs"
sys.exit(1)
bad_vhd_path = sys.argv[3]
if not os.path.exists(bad_vhd_path):
os.makedirs(bad_vhd_path)
bad_leaves = []
descendents = {}
for fname in glob.glob(os.path.join(sr_path, "*.vhd")):
(returncode, stdout, stderr) = execute(
['vhd-util', 'query', '-n', fname, '-p'], ok_exit_codes=[0, 22])
stdout = stdout.strip()
if stdout.endswith('.vhd'):
try:
descendents[stdout].append(fname)
except KeyError:
descendents[stdout] = [fname]
elif 'query failed' in stdout:
bad_leaves.append(fname)
def walk_vhds(root):
yield root
if root in descendents:
for child in descendents[root]:
for vhd in walk_vhds(child):
yield vhd
for bad_leaf in bad_leaves:
for bad_vhd in walk_vhds(bad_leaf):
print bad_vhd
if action == "print":
pass
elif action == "delete":
os.unlink(bad_vhd)
elif action == "move":
new_path = os.path.join(bad_vhd_path,
os.path.basename(bad_vhd))
os.rename(bad_vhd, new_path)
else:
raise Exception("invalid action %s" % action)
if __name__ == '__main__':
main()

View File

@ -1,329 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""vm_vdi_cleaner.py - List or clean orphaned VDIs/instances on XenServer."""
import doctest
import os
import sys
from oslo.config import cfg
import XenAPI
possible_topdir = os.getcwd()
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
from nova import config
from nova import context
from nova import db
from nova import exception
from oslo.utils import timeutils
from nova.virt import virtapi
from nova.virt.xenapi import driver as xenapi_driver
cleaner_opts = [
cfg.IntOpt('zombie_instance_updated_at_window',
default=172800,
help='Number of seconds zombie instances are cleaned up.'),
]
cli_opt = cfg.StrOpt('command',
help='Cleaner command')
CONF = cfg.CONF
CONF.register_opts(cleaner_opts)
CONF.register_cli_opt(cli_opt)
CONF.import_opt('verbose', 'nova.openstack.common.log')
CONF.import_opt("resize_confirm_window", "nova.compute.manager")
ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
"clean-instances", "test"]
def call_xenapi(xenapi, method, *args):
"""Make a call to xapi."""
return xenapi._session.call_xenapi(method, *args)
def find_orphaned_instances(xenapi):
"""Find and return a list of orphaned instances."""
ctxt = context.get_admin_context(read_deleted="only")
orphaned_instances = []
for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi):
try:
uuid = vm_rec['other_config']['nova_uuid']
instance = db.instance_get_by_uuid(ctxt, uuid)
except (KeyError, exception.InstanceNotFound):
# NOTE(jk0): Err on the side of caution here. If we don't know
# anything about the particular instance, ignore it.
print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0)
continue
# NOTE(jk0): This would be triggered if a VM was deleted but the
# actual deletion process failed somewhere along the line.
is_active_and_deleting = (instance.vm_state == "active" and
instance.task_state == "deleting")
# NOTE(jk0): A zombie VM is an instance that is not active and hasn't
# been updated in over the specified period.
is_zombie_vm = (instance.vm_state != "active"
and timeutils.is_older_than(instance.updated_at,
CONF.zombie_instance_updated_at_window))
if is_active_and_deleting or is_zombie_vm:
orphaned_instances.append((vm_ref, vm_rec, instance))
return orphaned_instances
def cleanup_instance(xenapi, instance, vm_ref, vm_rec):
"""Delete orphaned instances."""
xenapi._vmops._destroy(instance, vm_ref)
def _get_applicable_vm_recs(xenapi):
"""An 'applicable' VM is one that is not a template and not the control
domain.
"""
for vm_ref in call_xenapi(xenapi, 'VM.get_all'):
try:
vm_rec = call_xenapi(xenapi, 'VM.get_record', vm_ref)
except XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if vm_rec["is_a_template"] or vm_rec["is_control_domain"]:
continue
yield vm_ref, vm_rec
def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4):
"""Pretty-print a Xen object.
Looks like:
VM (abcd-abcd-abcd): 'name label here'
"""
if not CONF.verbose:
return
uuid = obj["uuid"]
try:
name_label = obj["name_label"]
except KeyError:
name_label = ""
msg = "%(obj_type)s (%(uuid)s) '%(name_label)s'" % locals()
indent = " " * spaces_per_indent * indent_level
print "".join([indent, msg])
def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids):
"""Find VDIs which are connected to VBDs which are connected to VMs."""
def _is_null_ref(ref):
return ref == "OpaqueRef:NULL"
def _add_vdi_and_parents_to_connected(vdi_rec, indent_level):
indent_level += 1
vdi_and_parent_uuids = []
cur_vdi_rec = vdi_rec
while True:
cur_vdi_uuid = cur_vdi_rec["uuid"]
print_xen_object("VDI", vdi_rec, indent_level=indent_level)
connected_vdi_uuids.add(cur_vdi_uuid)
vdi_and_parent_uuids.append(cur_vdi_uuid)
try:
parent_vdi_uuid = vdi_rec["sm_config"]["vhd-parent"]
except KeyError:
parent_vdi_uuid = None
# NOTE(sirp): VDI's can have themselves as a parent?!
if parent_vdi_uuid and parent_vdi_uuid != cur_vdi_uuid:
indent_level += 1
cur_vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid',
parent_vdi_uuid)
try:
cur_vdi_rec = call_xenapi(xenapi, 'VDI.get_record',
cur_vdi_ref)
except XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
break
else:
break
for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi):
indent_level = 0
print_xen_object("VM", vm_rec, indent_level=indent_level)
vbd_refs = vm_rec["VBDs"]
for vbd_ref in vbd_refs:
try:
vbd_rec = call_xenapi(xenapi, 'VBD.get_record', vbd_ref)
except XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
indent_level = 1
print_xen_object("VBD", vbd_rec, indent_level=indent_level)
vbd_vdi_ref = vbd_rec["VDI"]
if _is_null_ref(vbd_vdi_ref):
continue
try:
vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vbd_vdi_ref)
except XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
_add_vdi_and_parents_to_connected(vdi_rec, indent_level)
def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids):
"""Collects all VDIs and adds system VDIs to the connected set."""
def _system_owned(vdi_rec):
vdi_name = vdi_rec["name_label"]
return (vdi_name.startswith("USB") or
vdi_name.endswith(".iso") or
vdi_rec["type"] == "system")
for vdi_ref in call_xenapi(xenapi, 'VDI.get_all'):
try:
vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vdi_ref)
except XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
vdi_uuid = vdi_rec["uuid"]
all_vdi_uuids.add(vdi_uuid)
# System owned and non-managed VDIs should be considered 'connected'
# for our purposes.
if _system_owned(vdi_rec):
print_xen_object("SYSTEM VDI", vdi_rec, indent_level=0)
connected_vdi_uuids.add(vdi_uuid)
elif not vdi_rec["managed"]:
print_xen_object("UNMANAGED VDI", vdi_rec, indent_level=0)
connected_vdi_uuids.add(vdi_uuid)
def find_orphaned_vdi_uuids(xenapi):
"""Walk VM -> VBD -> VDI change and accumulate connected VDIs."""
connected_vdi_uuids = set()
_find_vdis_connected_to_vm(xenapi, connected_vdi_uuids)
all_vdi_uuids = set()
_find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids)
orphaned_vdi_uuids = all_vdi_uuids - connected_vdi_uuids
return orphaned_vdi_uuids
def list_orphaned_vdis(vdi_uuids):
"""List orphaned VDIs."""
for vdi_uuid in vdi_uuids:
if CONF.verbose:
print "ORPHANED VDI (%s)" % vdi_uuid
else:
print vdi_uuid
def clean_orphaned_vdis(xenapi, vdi_uuids):
"""Clean orphaned VDIs."""
for vdi_uuid in vdi_uuids:
if CONF.verbose:
print "CLEANING VDI (%s)" % vdi_uuid
vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid', vdi_uuid)
try:
call_xenapi(xenapi, 'VDI.destroy', vdi_ref)
except XenAPI.Failure, exc:
print >> sys.stderr, "Skipping %s: %s" % (vdi_uuid, exc)
def list_orphaned_instances(orphaned_instances):
"""List orphaned instances."""
for vm_ref, vm_rec, orphaned_instance in orphaned_instances:
if CONF.verbose:
print "ORPHANED INSTANCE (%s)" % orphaned_instance.name
else:
print orphaned_instance.name
def clean_orphaned_instances(xenapi, orphaned_instances):
"""Clean orphaned instances."""
for vm_ref, vm_rec, instance in orphaned_instances:
if CONF.verbose:
print "CLEANING INSTANCE (%s)" % instance.name
cleanup_instance(xenapi, instance, vm_ref, vm_rec)
def main():
"""Main loop."""
config.parse_args(sys.argv)
args = CONF(args=sys.argv[1:], usage='%(prog)s [options] --command={' +
'|'.join(ALLOWED_COMMANDS) + '}')
command = CONF.command
if not command or command not in ALLOWED_COMMANDS:
CONF.print_usage()
sys.exit(1)
if CONF.zombie_instance_updated_at_window < CONF.resize_confirm_window:
raise Exception("`zombie_instance_updated_at_window` has to be longer"
" than `resize_confirm_window`.")
# NOTE(blamar) This tool does not require DB access, so passing in the
# 'abstract' VirtAPI class is acceptable
xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
if command == "list-vdis":
if CONF.verbose:
print "Connected VDIs:\n"
orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
if CONF.verbose:
print "\nOrphaned VDIs:\n"
list_orphaned_vdis(orphaned_vdi_uuids)
elif command == "clean-vdis":
orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
clean_orphaned_vdis(xenapi, orphaned_vdi_uuids)
elif command == "list-instances":
orphaned_instances = find_orphaned_instances(xenapi)
list_orphaned_instances(orphaned_instances)
elif command == "clean-instances":
orphaned_instances = find_orphaned_instances(xenapi)
clean_orphaned_instances(xenapi, orphaned_instances)
elif command == "test":
doctest.testmod()
else:
print "Unknown command '%s'" % command
sys.exit(1)
if __name__ == "__main__":
main()