Open for liberty

Signed-off-by: Chuck Short <chuck.short@canonical.com>
This commit is contained in:
Chuck Short 2015-06-07 19:04:40 -04:00
parent ee74894cf7
commit 270ba38816
19 changed files with 1410 additions and 886 deletions

View File

View File

@ -1,49 +0,0 @@
#!/usr/bin/python
import json
import optparse
import os
import subprocess
import tempfile
import time
def parse_argv():
optparser = optparse.OptionParser()
optparser.add_option('-i', '--image',
help='Path to image', dest='image', metavar='PATH')
(opts, args) = optparser.parse_args()
if not os.path.exists(opts.image):
optparser.error('Unable to open file')
return (opts, args)
def create_tarball():
workdir = tempfile.mkdtemp()
rootfs_dir = os.path.join(workdir, 'rootfs')
os.mkdir(rootfs_dir)
image = opts.image
r = subprocess.call(['tar', '--anchored', '--numeric-owner',
'--exclude=dev/*', '-zxf', image,
'-C', rootfs_dir])
epoch = time.time()
metadata = {
'architecutre': 'x86_64',
'creation_date': int(epoch)
}
metadata_yaml = json.dumps(metadata, sort_keys=True,
indent=4, separators=(',', ': '),
ensure_ascii=False).encode('utf-8') + b"\n"
metadata_file = os.path.join(workdir, 'metadata.yaml')
with open(metadata_file, 'w') as fp:
fp.write(metadata_yaml)
source_tarball = image.split('.')
dest_tarball = "%s-lxd.tar.gz" % source_tarball[0]
r = subprocess.call(['tar', '-C', workdir, '-zcf',
dest_tarball, 'metadata.yaml', 'rootfs'])
if __name__ == '__main__':
(opts, args) = parse_argv()
create_tarball()

View File

@ -1,3 +1,18 @@
from nclxd.nova.virt.lxd import driver
# Copyright 2015 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.lxd import driver
LXDDriver = driver.LXDDriver

View File

@ -1,197 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import json
import socket
class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, path, host='localhost', port=None, strict=None,
timeout=None):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict,
timeout=timeout)
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class Client(object):
def __init__(self):
self.unix_socket = '/var/lib/lxd/unix.socket'
def _make_request(self, *args, **kwargs):
conn = UnixHTTPConnection(self.unix_socket)
conn.request(*args, **kwargs)
response = conn.getresponse()
data = json.loads(response.read())
return response.status, data
# host ping
def ping(self):
(status, data) = self._make_request('GET', '/1.0')
return (status, data)
# containers
def container_list(self):
(status, data) = self._make_request('GET', '/1.0/containers')
return [container.split('/1.0/containers/')[-1]
for container in data['metadata']]
def container_info(self, container):
(status, data) = self._make_request('GET', '/1.0/containers/%s'
% container)
return (status, data)
def container_defined(self, name):
(status, data) = self._make_request('GET', '/1.0/containers/%s' % name)
container_defined = True
if data.get('type') == 'error':
container_defined = False
return container_defined
def container_running(self, name):
container_running = False
(status, data) = self._make_request('GET', '/1.0/containers/%s' % name)
metadata = data.get('metadata')
if metadata.get('status') == 'RUNNING':
container_running = True
return container_running
def container_init(self, config):
(status, data) = self._make_request('POST', '/1.0/containers',
json.dumps(config))
return (status, data)
def container_start(self, name):
action = {'action': 'start', 'timeout': 30, 'force': True}
(status, data) = self._make_request('PUT', '/1.0/containers/%s/state'
% name, json.dumps(action))
return (status, data)
def container_restart(self, name):
action = {'action': 'restart', 'timeout': 30, 'force': True}
(status, data) = self._make_request('PUT', '/1.0/containers/%s/state'
% name, json.dumps(action))
return (status, data)
def container_stop(self, name):
action = {'action': 'stop', 'timeout': 30, 'force': True}
(status, data) = self._make_request('PUT', '/1.0/containers/%s/state'
% name, json.dumps(action))
return (status, data)
def container_suspend(self, name):
action = {'action': 'freeze', 'timeout': 30, 'force': True}
(status, data) = self._make_request('PUT', '/1.0/containers/%s/state'
% name, json.dumps(action))
return (status, data)
def container_resume(self, name):
action = {'action': 'unfreeze', 'timeout': 30, 'force': True}
(status, data) = self._make_request('PUT', '/1.0/containers/%s/state'
% name, json.dumps(action))
return (status, data)
def container_delete(self, name):
(status, data) = self._make_request('DELETE', '/1.0/containers/%s'
% name)
return (status, data)
def container_update(self, name, config):
(status, data) = self._make_request('PUT', '/1.0/containers/%s'
% name, json.dumps(config))
return (status, data)
# profiles
def profile_list(self):
(status, data) = self._make_request('GET', '/1.0/profiles')
return [profile.split('/1.0/profiles/')[-1]
for profile in data['metadata']]
def profile_defined(self, name):
(status, data) = self._make_request('GET', '/1.0/profiles/%s' % name)
profile_defined = True
if data.get('type') == 'error':
profile_defined = False
return profile_defined
def profile_create(self, config):
(status, data) = self._make_request('POST', '/1.0/profiles',
json.dumps(config))
return (status, data)
def profile_update(self, name, config):
(status, data) = self._make_request('PUT', '/1.0/profiles/%s' % name,
json.dumps(config))
return (status, data)
def profile_show(self, name):
(status, data) = self._make_request('GET', '/1.0/profiles/%s' % name)
container_profile = data.get('metadata')
return {'status': data.get('status'),
'status_code': data.get('status_code'),
'profile': str(container_profile.get('config', 'None')),
'devices': str(container_profile.get('devices', 'None'))}
# images
def image_list(self):
(status, data) = self._make_request('GET', '/1.0/images')
return [image.split('/1.0/images/')[-1] for image in data['metadata']]
def image_upload(self, path, filename):
(status, data) = self._make_request('POST', '/1.0/images',
open(path, 'rb'))
return (status, data)
def image_delete(self, name):
(status, data) = self._make_request('DELETE', '/1.0/images/%s' % name)
return (status, data)
def image_export(self, name):
raise NotImplemented()
# aliases
def alias_list(self):
(status, data) = self._make_request('GET', '/1.0/images/aliases')
return [alias.split('/1.0/aliases/')[-1] for alias in data['metadata']]
def alias_create(self, name, target):
payload = {'target': target, 'name': name}
(status, data) = self._make_request(
'POST', '/1.0/images/aliases', json.dumps(payload))
return (status, data)
def alias_delete(self, name):
(status, data) = self._make_request(
'DELETE', '/1.0/images/aliases/%s' % name)
return (status, data)
# operations
def operation_list(self):
(status, data) = self._make_request('GET', '/1.0/operations')
return [operation.split('/1.0/operations/')[-1]
for operation in data['metadata']['running']]
def operation_show(self, oid):
(status, data) = self._make_request('GET', '/1.0/operations/%s' % oid)
return (status, data)

View File

@ -1,4 +1,9 @@
# Copyright (c) 2015 Canonical Ltd
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -12,25 +17,26 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import pwd
from oslo.config import cfg
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova.openstack.common import loopingcall
from nova.i18n import _, _LW, _LE, _LI
from nova import utils
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.compute import power_state
from nova import exception
from nova import utils
import image
import profile
import vif
import container_utils
from . import vif
from . import images
from . import utils as container_utils
CONF = cfg.CONF
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
@ -43,245 +49,244 @@ MAX_CONSOLE_BYTES = 100 * units.Ki
LXD_POWER_STATES = {
'RUNNING': power_state.RUNNING,
'STOPPED': power_state.SHUTDOWN,
'STARTING': power_state.BUILDING,
'STARTING': power_state.NOSTATE,
'STOPPING': power_state.SHUTDOWN,
'ABORTING': power_state.CRASHED,
'FREEZING': power_state.PAUSED,
'FROZEN': power_state.SUSPENDED,
'THAWED': power_state.PAUSED,
'PENDING': power_state.BUILDING,
'PENDING': power_state.NOSTATE,
'UNKNOWN': power_state.NOSTATE
}
class Container(object):
def __init__(self, client, virtapi):
self.client = client
def __init__(self, lxd, virtapi):
self.lxd = lxd
self.virtapi = virtapi
self.idmap = container_utils.LXCUserIdMap()
self.image = images.ContainerImage(self.client,
self.idmap)
self.image_driver = image.load_driver(CONF.lxd.lxd_image_type,
self.lxd)
self.profile = profile.LXDProfile(self.lxd)
self.vif_driver = vif.LXDGenericDriver()
def init_host(self):
(status, resp) = self.client.ping()
if resp['status'] != 'Success':
msg = _('LXD is not available')
raise exception.HypervisorUnavailable(msg)
def container_rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info, recreate,
block_device_info,
preserve_ephemeral):
raise NotImplemented()
def container_start(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
LOG.info(_LI('Spawning new instance'), instance=instance)
if self.client.container_defined(instance.uuid):
raise exception.InstanceExists(name=instance.uuid)
admin_password, network_info, block_device_info):
try:
LOG.debug('Fetching image from Glance.')
self.image.fetch_image(context, instance, image_meta)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create image for: %(instance)s'),
{'instance': instance.uuid})
self.container_destroy(context, instance, network_info,
block_device_info,
destroy_disks=None, migrate_data=None)
LOG.info(_LI('Starting container'), instance=instance)
if self.lxd.container_defined(instance.uuid):
raise exception.InstanceExists(name=instance.uuid)
try:
LOG.debug('Setting up container profiles')
self.setup_container(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to setup container for: %(instance)s'),
{'instance': instance.uuid})
self.container_destroy(context, instance, network_info,
block_device_info,
destroy_disks=None, migrate_data=None)
try:
LOG.debug('Setup Networking')
self._start_network(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to setup container for: %(instance)s'),
{'instance': instance.uuid})
self.container_destroy(context, instance, network_info,
block_device_info,
destroy_disks=None, migrate_data=None)
try:
LOG.debug('Start container')
self.image_driver.setup_container(context, instance, image_meta)
self.profile.profile_create(instance, network_info)
self._setup_container(instance)
self._start_container(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to setup container for: %(instance)s'),
{'instance': instance.uuid})
self.container_destroy(context, instance, network_info,
block_device_info,
destroy_disks=None, migrate_data=None)
block_device_info, destroy_disks=None,
migrate_data=None)
def _wait_for_boot():
state = self.container_info(instance)
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.6).wait()
def setup_container(self, instance, network_info):
if self.client.container_defined(instance.uuid):
return
if not self.client.profile_defined(instance.uuid):
self._create_container_profile(instance, network_info)
container_rootfs = self._get_container_rootfs(instance)
container = {'name': instance.uuid,
'ephemeral': True,
'profiles': ['%s' % instance.uuid],
'source': {'type': 'none', 'path': container_rootfs}}
if network_info:
network_devices = self._get_container_devices(network_info)
container['devices'] = network_devices
(status, resp) = self.client.container_init(container)
print resp
if resp.get('status') != 'OK':
msg = _('Failed to setup container: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def _create_container_profile(self, instance, network_info):
console_log = self._get_console_path(instance)
profile = {'name': instance.uuid,
'config': {'raw.lxc': 'lxc.console.logfile=%s\n' % console_log},}
if network_info:
network_devices = self._get_container_devices(network_info)
profile['devices'] = network_devices
(status, resp) = self.client.profile_create(profile)
if resp.get('status') != 'Success':
msg = _('Failed to create profile: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_restart(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
if self.client.container_defind(instance.uuid):
return
(status, resp) = self.client.container_restart(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container restart failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_power_on(self, instance, shutdown_timeout=0, shutdown_attempts=0):
if self.client.container_defind(instance.uuid):
return
(status, resp) = self.client.container_start(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container power on failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_power_off(self, instance):
if self.client.container_defined(instance.uuid):
return
(status, resp) = self.client.container_stop(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container power off failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_suspend(self, instance):
if self.client.container_defind(instance.uuid):
return
(status, resp) = self.client.container_suspend(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container suspend failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_resume(self, context, instance, network_info, block_device_info=None):
if self.client.container_defind(instance.uuid):
return
(status, resp) = self.client.container_resume(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container resume failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
def container_destroy(
self, context, instance, network_info, block_device_info,
destroy_disks, migrate_data):
if not self.client.container_defined(instance.uuid):
return
(status, resp) = self.client.container_delete(instance.uuid)
if resp.get('status') != 'OK':
msg = _('Container destroy failed: %(instance)s - %(reason)s') % \
{'instance': instance.uuid, 'reason': resp.get('metadata')}
raise exception.NovaException(msg)
oid = resp.get('operation').split('/')[3]
if not oid:
msg = _('Unable to determine resource id')
raise exception.NovaException(msg)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_operation,
oid)
timer.start(interval=0.6).wait()
self.cleanup_container(instance, network_info)
def get_console_log(self, instance):
if self.client.container_defined(instance.uuid):
return
console_dir = os.path.join(CONF.lxd.lxd_root_dir, instance.uuid)
console_log = self._get_console_path(instance)
uid = pwd.getpwuid(os.getuid()).pw_uid
utils.execute('chown', '%s:%s' %
(uid, uid), console_log, run_as_root=True)
utils.execute('chmod', '755', console_dir, run_as_root=True)
with open(console_log, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
def container_info(self, instance):
def container_destroy(self, context, instance, network_info,
block_device_info, destroy_disks, migrate_data):
LOG.info(_LI('Destroying container'))
try:
(status, resp) = self.client.container_info(instance.uuid)
metadata = resp.get('metadata')
container_state = metadata['status']['status']
if not self.lxd.container_defined(instance.uuid):
return
self.lxd.container_destroy(instance.uuid)
self.container_cleanup(context, instance, network_info,
block_device_info, destroy_disks=None,
migrate_data=None)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to destroy instance: %s ') % ex)
def container_reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist')
raise exception.NovaException(msg)
return self.lxd.container_reboot(instance.uuid, 20)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to destroy instance: %s ') % ex)
def get_console_output(self, context, instance):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist')
raise exception.NovaException(msg)
console_log = container_utils.get_console_path(instance)
uid = pwd.getpwuid(os.getuid()).pw_uid
utils.execute('chown', '%s:%s' % (uid, uid),
console_log, run_as_root=True)
utils.execute('chmod', '755',
container_utils.get_container_dir(instance),
run_as_root=True)
with open(console_log , 'rb') as fp:
log_data, remaning = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
return log_data
except Exception as ex:
LOG.exception(_LE('Failed container: %s') % ex)
return ""
def container_cleanup(self, context, instance, network_info,
block_device_info, destroy_disks, migrate_data,
destroy_vifs=True):
LOG.info(_LI('Cleaning up container'))
try:
self.profile.profile_delete(instance)
self.unplug_vifs(instance, network_info)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.excpetion(_LE('Unable to clean up instance: %s') % ex)
def container_state(self, instance):
try:
container_state = self.lxd.container_state(instance.uuid)
state = LXD_POWER_STATES[container_state]
except Exception:
state = power_state.NOSTATE
return state
def container_pause(self, instance):
raise NotImplementedError()
def container_unpause(self, instance):
raise NotImplementedError()
def container_suspend(self, context, instance):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _("Container is not defined")
raise exception.NovaException(msg)
self.lxd.container_suspend(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to suspend container"))
def container_resume(self, context, instance, network_info,
block_device_info=None):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container does not exist.')
raise exception.NovaException(msg)
self.lxd.container_resume(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to resume container"))
def container_rescue(self, context, instance, network_info, image_meta,
rescue_password):
raise NotImplementedError()
def container_unrescue(self, instance, network_info):
raise NotImplementedError()
def container_power_off(self, instance, timeout=0, retry_interval=0):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container is not defined')
raise exception.NovaException(msg)
self.lxd.container_stop(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.execption(_LE("Unable to power off container"))
raise NotImplementedError()
def container_power_on(self, context, instance, network_info,
block_device_info):
try:
if not self.lxd.container_defined(instance.uuid):
msg = _('Container is not defined')
raise exception.NovaException(msg)
self.lxd.container_start(instance.uuid, 20)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to power on conatainer"))
def container_soft_delete(self, instance):
raise NotImplementedError()
def container_restore(self, instance):
raise NotImplementedError()
def container_get_resource(self, nodename):
raise NotImplementedError()
def container_inject_file(self, instance, b64_path, b64_contents):
raise NotImplementedError()
def container_inject_network_info(self, instance, nw_info):
pass
def container_poll_rebooting_instances(self, timeout, instances):
raise NotImplementedError()
def container_attach_interface(self, instance, image_meta, vif):
raise NotImplementedError()
def container_detach_interface(self, instance, vif):
raise NotImplementedError()
def container_snapshot(self, context, instance, image_id,
update_task_state):
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
pass
def container_quiesce(self, context, instance, image_meta):
raise NotImplementedError()
def container_unquiesce(self, context, instance, image_meta):
raise NotImplementedError()
def _setup_container(self, instance):
LOG.debug('Setting up container')
if not os.path.exists(
container_utils.get_container_image(instance)):
msg = _('Container image doesnt exist.')
raise exception.NovaException(msg)
if instance.uuid:
container = {}
container['name'] = instance.uuid
container['profiles'] = ['%s' % instance.uuid]
container['source'] = {
'type': 'image',
'alias': instance.image_ref
}
(state, data) = self.lxd.container_init(container)
self._wait_for_container(data.get('operation').split('/')[3])
def _start_container(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
# check to see if neutron is ready before
# doing anything else
if (not self.client.container_running(instance.uuid) and
if (not self.lxd.container_running(instance.uuid) and
utils.is_neutron() and timeout):
events = self._get_neutron_events(network_info)
else:
@ -289,46 +294,40 @@ class Container(object):
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self._start_network(instance, network_info)
self.plug_vifs(instance, network_info)
except exception.VirtualInterfaceCreateException:
LOG.info(_LW('Failed'))
LOG.info(_LW('Failed to connect networking to instance'))
try:
(status, resp) = self.client.container_start(instance.uuid)
if resp.get('status') != 'OK':
raise exception.NovaException
except Exception as e:
LOG.debug(_('Failed to container instance: %s') %
resp.get('metadata'))
msg = _('Cannot container container: {0}')
raise exception.NovaException(msg.format(e),
instance_id=instance.name)
(state, data) = self.lxd.container_start(instance.uuid, 20)
self._wait_for_container(data.get('operation').split('/')[3])
def cleanup_container(self, instance, network_info):
self._teardown_network(instance, network_info)
try:
rootfs = self._get_container_rootfs(instance)
utils.execute('umount', rootfs,
attempts=3, delay_on_retry=True,
run_as_root=True)
except Exception as exc:
LOG.exception(_LE("Couldn't unmount the share %s"),
exc)
def _destroy_container(self, context, instance, network_info,
block_device_info,
destroy_disks, migrate_data):
if self.lxd.container_defined(instance.uuid):
msg = _('Unable to find container')
raise exception.NovaException(msg)
def _start_network(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
self.lxd.container_destroy(instance.uuid)
def _teardown_network(self, instance, network_info):
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def plug_vifs(self, instance, network_info):
for _vif in network_info:
self.vif_driver.plug(instance, _vif)
def _wait_for_operation(self, oid):
containers = self.client.operation_list()
if oid not in containers:
raise loopingcall.LoopingCallDone()
def unplug_vifs(self, instance, network_info):
for _vif in network_info:
self.vif_driver.unplug(instance, _vif)
def _wait_for_container(self, oid):
if not oid:
msg = _('Unable to determine container operation')
raise exception.NovaException(msg)
if not self.lxd.wait_container_operation(oid, 200, 20):
msg = _('Container creation timed out')
raise exception.NovaException(msg)
def _get_neutron_events(self, network_info):
return [('network-vif-plugged', vif['id'])
@ -340,24 +339,3 @@ class Container(object):
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_container_rootfs(self, instance):
return os.path.join(CONF.lxd.lxd_root_dir, instance.uuid, 'rootfs')
def _get_console_path(self, instance):
return os.path.join(CONF.lxd.lxd_root_dir, instance.uuid, 'console.log')
def _get_container_devices(self, network_info):
for vif in network_info:
vif_id = vif['id'][:11]
vif_type = vif['type']
bridge = vif['network']['bridge']
mac = vif['address']
if vif_type == 'ovs':
bridge = 'qbr%s' % vif_id
return {'eth0': {'nictype': 'bridged',
'hwaddr': mac,
'parent': bridge,
'type': 'nic'}}

View File

@ -0,0 +1,63 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LE
from nova.virt import images
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_base_dir():
return os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
def get_container_image(instance):
base_dir = get_base_dir()
return os.path.join(base_dir,
'%s.tar.gz' % instance.image_ref)
def fetch_image(context, image, instance, max_size=0):
try:
images.fetch(context, instance.image_ref, image,
instance.user_id, instance.project_id,
max_size=max_size)
except Exception:
LOG.exception(_LE("Image %(image_id)s doesn't exist anymore on"),
{'image_id': instance.image_ref})
def get_console_path(instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
def get_container_dir(instance):
return os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)

View File

@ -1,8 +1,9 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2015 Canonical Ltd.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -17,224 +18,421 @@
# under the License.
"""
Nova LXD Driver
LXD Driver
"""
import socket
import multiprocessing
from oslo.utils import units
from oslo.config import cfg
from oslo_config import cfg
from oslo_log import log as logging
from oslo.serialization import jsonutils
import client
from pylxd import api
from nova.i18n import _
from nova import exception
from nova.i18n import _, _LE
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
import container
import host_utils
import host
import migration
lxd_opts = [
cfg.StrOpt('lxd_socket',
default='/var/lib/lxd/unix.socket',
help='Default LXD unix socket'),
cfg.StrOpt('lxd_root_dir',
default='/var/lib/lxd/lxc',
help='Default LXD directory')
default='/var/lib/lxd/',
help='Default LXD directory'),
cfg.StrOpt('lxd_image_type',
default='nova.virt.lxd.image.DefaultContainerImage',
help='Default image')
]
CONF = cfg.CONF
CONF.register_opts(lxd_opts, 'lxd')
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class LXDDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False
}
"""LXD hypervisor driver."""
def __init__(self, virtapi):
self.virtapi = virtapi
def __init__(self, virtapi, read_only=False):
super(LXDDriver, self).__init__(virtapi)
self.lxd = api.API()
self.client = client.Client()
self.container = container.Container(self.client,
virtapi)
self.container = container.Container(self.lxd, self.virtapi)
self.migration = migration.Migration()
self.host = host.Host(self.lxd)
def init_host(self, host):
return self.container.init_host()
def list_instances(self):
return self.client.container_list()
def list_instance_uuids(self):
return self.client.container_list()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
self.container.container_start(context, instance, image_meta,
injected_files, admin_password, network_info,
block_device_info, flavor)
def snapshot(self, context, instance, name, update_task_state):
raise NotImplemented()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self.container.container_restart(
context, instance, network_info, reboot_type,
block_device_info, bad_volumes_callback)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
raise NotImplemented()
def unrescue(self, instance, network_info):
raise NotImplemented()
def poll_rebooting_instances(self, timeout, instances):
raise NotImplemented()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
raise NotImplemented()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
raise NotImplemented()
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
raise NotImplemented()
def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
self.container.container_power_off(instance)
def power_on(self, context, instance, network_info, block_device_info):
self.container.container_power_on(
instance, network_info, block_device_info)
def soft_delete(self, instance):
pass
def restore(self, instance):
raise NotImplemented()
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
return self.container.container_suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
return self.container.container_resume(context, instance, network_info, block_device_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
return self.container.container_destroy(
context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
self.container.teardown_network(instance, network_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplemented()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplemented()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
raise NotImplemented()
def attach_interface(self, instance, image_meta, vif):
raise NotImplemented()
def detach_interface(self, instance, vif):
raise NotImplemented()
try:
self.lxd.host_ping()
except Exception as ex:
LOG.exception(_LE('Unable to connect to LXD daemon: %s') % ex)
raise
def get_info(self, instance):
istate = self.container.container_info(instance)
istate = self.container.container_state(instance)
return hardware.InstanceInfo(state=istate,
max_mem_kb=0,
mem_kb=0,
num_cpu=1,
cpu_time_ns=0)
def instance_exists(self, instance):
try:
return instance.uuid in self.list_instance_uuids()
except NotImplementedError:
return instance.name in self.list_instances()
def list_instances(self):
return self.lxd.container_list()
def list_instance_uuids(self):
return self.lxd.container_list()
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.container.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info, ignore_errors):
try:
for vif in network_info:
self.container.unplug_vifs(instance, network_info)
except exception.Exception:
if not ignore_errors:
raise
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
return self.container.container_rebuild(context, instance, image_meta,
injected_files, admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
return self.container.container_start(context, instance, image_meta,
injected_files, admin_password,
network_info, block_device_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
return self.container.container_destroy(context, instance,
network_info,
block_device_info,
destroy_disks,
migrate_data)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
return self.container.container_cleanup(context, instance,
network_info, block_device_info,
destroy_disks, migrate_data,
destroy_vifs)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
return self.container.container_reboot(context, instance,
network_info,
reboot_type, block_device_info,
bad_volumes_callback)
def get_console_pool_info(self, console_type):
raise NotImplementedError()
def get_console_output(self, context, instance):
return self.container.get_console_log(instance)
return self.container.get_console_output(context, instance)
def refresh_security_group_rules(self, security_group_id):
def get_vnc_console(self, context, instance):
raise NotImplementedError()
def get_spice_console(self, context, instance):
raise NotImplementedError()
def get_rdp_console(self, context, instance):
raise NotImplementedError()
def get_serial_console(self, context, instance):
raise NotImplementedError()
def get_diagnostics(self, instance):
raise NotImplementedError()
def get_instance_diagnostics(self, instance):
raise NotImplementedError()
def get_all_bw_counters(self, instances):
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
raise NotImplementedError()
def get_host_ip_addr(self):
return self.host.get_host_ip_addr()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self.volume.container_attach(context, connection_info,
instance, mountpoint,
disk_bus, device_type,
encryption)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self.volume.container_detach_volume(connection_info, instance,
mountpoint, encryption)
def attach_interface(self, instance, image_meta, vif):
return self.container.container_attach_interface(instance, image_meta,
vif)
def detach_interface(self, instance, vif):
return self.container.containre_detach_interface(instance, vif)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
return self.container.snapshot(context, instance, image_id,
update_task_state)
def post_interrupted_snapshot_cleanup(self, context, instance):
pass
def refresh_security_group_members(self, security_group_id):
pass
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
pass
def confirm_migration(self, migration, instance, network_info):
raise NotImplementedError()
def refresh_provider_fw_rules(self):
pass
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
raise NotImplementedError()
def pause(self, instance):
return self.container.container_pause(instance)
def unpause(self, instance):
return self.container.container_unpause(instance)
def suspend(self, context, instance):
return self.container.container_suspend(context, instance)
def resume(self, context, instance, network_info, block_device_info=None):
return self.container.container_resume(context, instance,
network_info,
block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
return self.container.container_rescue(context, instance,
network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
return self.container.container_unrescue(instance, network_info)
def power_off(self, instance, timeout=0, retry_interval=0):
return self.container.container_power_off(instance, timeout,
retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
return self.container.container_power_on(context, instance,
network_info,
block_device_info)
def soft_delete(self, instance):
return self.container.container_soft_deelte(instance)
def restore(self, instance):
return self.container.container_restore(instance)
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
return self.host.get_available_resource(nodename)
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
data = {}
disk = host_utils.get_fs_info(CONF.lxd.lxd_root_dir)
memory = host_utils.get_memory_mb_usage()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
raise NotImplementedError()
data["supported_instances"] = jsonutils.dumps([
('i686', 'lxd', 'lxd'),
('x86_64', 'lxd', 'lxd')])
data["vcpus"] = multiprocessing.cpu_count()
data["memory_mb"] = memory['total'] / units.Mi
data["local_gb"] = disk['total'] / units.Gi
data["vcpus_used"] = 1
data["memory_mb_used"] = memory['used'] / units.Mi
data["local_gb_used"] = disk['used'] / units.Gi
data["hypervisor_type"] = "lxd"
data["hypervisor_version"] = "0"
data["hypervisor_hostname"] = nodename
data["cpu_info"] = "?"
data["disk_available_least"] = disk['available'] / units.Gi
data['numa_topology'] = None
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
raise NotImplementedError()
return data
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
raise NotImplementedError()
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
pass
def post_live_migration_at_source(self, context, instance, network_info):
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
raise NotImplementedError()
def check_instance_shared_storage_local(self, context, instance):
raise NotImplementedError()
def check_instance_shared_storage_remote(self, context, data):
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, context, data):
pass
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
raise NotImplementedError()
def refresh_provider_fw_rules(self):
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
raise NotImplementedError()
def ensure_filtering_rules_for_instance(self, instance, network_info):
raise NotImplementedError()
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
def unfilter_instance(self, instance, network_info):
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
pass
def poll_rebooting_instances(self, timeout, instances):
raise NotImplementedError()
def host_power_action(self, action):
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
raise NotImplementedError()
def set_host_enabled(self, enabled):
raise NotImplementedError()
def get_host_uptime(self):
return self.host.get_host_uptime()
def get_host_cpu_stats(self):
return self.host.get_host_cpu_stats()
def block_stats(self, instance, disk_id):
return [0, 0, 0, 0, None] # zulcss - fixme
def deallocate_networks_on_reschedule(self, instance):
return False
def manage_image_cache(self, context, all_instances):
pass
def get_volume_connector(self, instance):
raise NotImplementedError()
def get_available_nodes(self, refresh=False):
hostname = socket.gethostname()
return [hostname]
def node_is_available(self, nodename):
if nodename in self.get_available_nodes():
return True
# Refresh and check again.
return nodename in self.get_available_nodes(refresh=True)
def get_per_instance_usage(self):
return {}
def instance_on_disk(self, instance):
return False
def register_event_listener(self, callback):
self._compute_event_callback = callback
def emit_event(self, event):
if not self._compute_event_callback:
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
return True
@property
def need_legacy_block_device_info(self):
return True
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
raise NotImplementedError()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
raise NotImplementedError()
def quiesce(self, context, instance, image_meta):
return self.container.container_quiesce(context, instance, image_meta)
def unquiesce(self, context, instance, image_meta):
return self.container.container_unquiesce(context, instance,
image_meta)

158
nclxd/nova/virt/lxd/host.py Normal file
View File

@ -0,0 +1,158 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import platform
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.i18n import _LW
from nova import utils
from cpuinfo import cpuinfo
import psutil
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Host(object):
def __init__(self, lxd):
self.lxd = lxd
self.host_cpu_info = cpuinfo.get_cpu_info()
def get_available_resource(self, nodename):
local_cpu_info = self._get_cpu_info()
cpu_topology = local_cpu_info['topology']
vcpus = (cpu_topology['cores'] *
cpu_topology['sockets'] *
cpu_topology['threads'])
local_memory_info = self._get_memory_mb_usage()
local_disk_info = self._get_fs_info(CONF.lxd.lxd_root_dir)
data = {
'vcpus': vcpus,
'memory_mb': local_memory_info['total'] / units.Mi,
'memory_mb_used': local_memory_info['used'] / units.Mi,
'local_gb': local_disk_info['total'] / units.Gi,
'local_gb_used': local_disk_info['used'] / units.Gi,
'vcpus_used': 0,
'hypervisor_type': 'lxd',
'hypervisor_version': 1,
'hypervisor_hostname': platform.node(),
'supported_instances': jsonutils.dumps(
[(arch.I686, hv_type.LXC, vm_mode.EXE),
(arch.X86_64, hv_type.LXC, vm_mode.EXE)]),
'numa_topology': None,
}
return data
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warn(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_host_uptime(self):
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def _get_fs_info(self, path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_blocks * hddinfo.f_bsize
available = hddinfo.f_bavail * hddinfo.f_bsize
used = total - available
return {'total': total,
'available': available,
'used': used}
def _get_memory_mb_usage(self):
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
def _get_cpu_info(self):
cpu_info = dict()
cpu_info['arch'] = platform.uname()[5]
cpu_info['model'] = self.host_cpu_info['brand']
cpu_info['vendor'] = self.host_cpu_info['vendor_id']
topology = dict()
topology['sockets'] = self._get_cpu_sockets()
topology['cores'] = self._get_cpu_cores()
topology['threads'] = 1 # fixme
cpu_info['topology'] = topology
cpu_info['features'] = self.host_cpu_info['flags']
return cpu_info
def _get_cpu_cores(self):
try:
return psutil.cpu_count()
except Exception:
return psutil.NUM_CPUS
def _get_cpu_sockets(self):
try:
return psutil.cpu_count(Logical=False)
except Exception:
return psutil.NUM_CPUS
def get_host_cpu_stats(self):
return {
'kernel': long(psutil.cpu_times()[2]),
'idle': long(psutil.cpu_times()[3]),
'user': long(psutil.cpu_times()[0]),
'iowait': long(psutil.cpu_times()[4]),
'frequency': self.host_cpu_info['hz_advertised']
}

View File

@ -0,0 +1,142 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from nova.i18n import _, _LE
from nova import exception
from nova.openstack.common import fileutils
from nova import utils
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_driver(default, *args, **kwargs):
image_class = importutils.import_class(CONF.lxd.lxd_image_type)
return image_class(*args, **kwargs)
def fetch_image(client, context, image, instance):
try:
if image not in client.image_list():
if not os.path.exists(container_utils.get_base_dir()):
fileutils.ensure_tree(container_utils.get_base_dir())
container_image = container_utils.get_container_image(
instance)
container_utils.fetch_image(context, container_image, instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error downloading image: %(instance)'
' %(image)s'),
{'instance': instance.uuid,
'image': instance.image_ref})
class BaseContainerImage(object):
def __init__(self, lxd):
self.lxd = lxd
def setup_container(self, context, instance, image_meta):
pass
def destory_contianer(self, instance, image_meta):
pass
class DefaultContainerImage(object):
def __init__(self, lxd):
self.lxd = lxd
def setup_container(self, context, instance, image_meta):
LOG.debug("Setting up Container")
container_image = container_utils.get_container_image(instance)
try:
if instance.image_ref in self.lxd.image_list():
return
if os.path.exists(container_image):
return
fetch_image(self.lxd, context,
instance.image_ref, instance)
self._upload_image(container_image, instance, image_meta)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to setup container: %s = %s'),
(instance.uuid, ex))
self.destroy_contianer(instance, image_meta)
raise
def _upload_image(self, container_image, instance, image_meta):
if not self._check_image_file(container_image, image_meta):
msg = _('md5checksum mismtach')
raise exception.NovaException(msg)
if not self.lxd.image_upload(container_image,
container_image.split('/')[-1]):
msg = _('Image upload failed')
raise exception.NovaException(msg)
config = {'target': self._get_lxd_md5sum(container_image),
'name': instance.image_ref}
if not self.lxd.alias_create(config):
msg = _('Alias creation failed')
raise exception.NovaException(msg)
def _check_image_file(self, container_image, image_meta):
md5sum = self._get_glance_md5sum(container_image)
if image_meta.get('checksum') == md5sum:
return True
else:
return False
def _get_glance_md5sum(self, container_image):
out, err = utils.execute('md5sum', container_image)
return out.split(' ')[0]
def _get_lxd_md5sum(self, container_image):
with open(container_image, 'rb') as fd:
return hashlib.sha256(fd.read()).hexdigest()
def _image_rollback(self, container_image):
if os.path.exists(container_image):
os.unlink(container_image)
def destroy_container(self, instance, image_meta):
LOG.debug('Destroying container')
container_image = container_utils.get_container_image(instance)
if instance.image_ref in self.lxd.alias_list():
self.lxd.alias_delete(instance.image_ref)
fingerprint = self._get_lxd_md5sum(container_image)
if fingerprint in self.lxd.image_list():
self.lxd.image_delete(fingerprint)
if os.path.exists(container_image):
os.unlink(container_image)

View File

@ -0,0 +1,92 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.i18n import _
class Migration(object):
def __init__(self):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
pass
def post_live_migration_at_source(self, context, instance, network_info):
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
raise NotImplementedError()
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
raise NotImplementedError()

View File

@ -0,0 +1,85 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
import container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LXDProfile(object):
def __init__(self, lxd):
self.lxd = lxd
''' Prefetch information that we need about the host.'''
self.host = self.lxd.host_info()
def profile_create(self, instance, network_info):
LOG.debug('Creating host profile')
profile = {'name': instance.uuid,
'config': {'raw.lxc':
'lxc.console.logfile = %s\n'
% container_utils.get_console_path(instance)}
}
if network_info:
profile['devices'] = self._get_network_devices(network_info)
if instance:
profile = self._get_container_limits(instance, profile)
if not self.lxd.profile_create(profile):
msg = _('Failed to create profile')
raise exception.NovaException(msg)
def profile_delete(self, instance):
if not self.lxd.profile_delete(instance.uuid):
msg = _('Unable to delete profile')
raise exception.NovaException(msg)
def _get_container_limits(self, instance, profile):
LOG.debug("Setting container limits")
if instance.vcpus >= 1:
profile['config'].update({'limits.cpus': '%s'
% instance.vcpus})
if instance.memory_mb >= 0:
profile['config'].update({'limits.memory': instance.memory_mb})
return profile
def _get_network_devices(self, network_info):
for vif in network_info:
vif_id = vif['id'][:11]
vif_type = vif['type']
bridge = vif['network']['bridge']
mac = vif['address']
if vif_type == 'ovs':
bridge = 'qbr%s' % vif_id
return {'eth0': {'nictype': 'bridged',
'hwaddr': mac,
'parent': bridge,
'type': 'nic'}}

View File

@ -12,20 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import getpass
import os
from oslo.config import cfg
from oslo_log import log as logging
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LW, _
from nova.i18n import _LE, _
from nova import exception
from nova import utils
from nova.network import linux_net
from nova.network import model as network_model
from nova import utils
CONF = cfg.CONF
@ -104,7 +100,7 @@ class LXDOpenVswitchDriver(object):
utils.execute('ip', 'link', 'set', v2_name, 'down',
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"),
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def _get_bridge_name(self, vif):

View File

@ -0,0 +1,32 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Justin Santa Barbara
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Volume(object):
def __init__(object):
pass
def container_attach_volume(self, context, connection_info, instance,
mountpoint, disk_bus=None, device_type=None,
encryption=None):
raise NotImplementedError()
def container_detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
raise NotImplementedError()

View File

@ -1,115 +0,0 @@
import json
import mock
import requests
from nova import test
from nclxd.nova.virt.lxd import client
class LXDFakeResponse(object):
"""Fake response to LXD API."""
def __init__(self, code=None, text=None):
self.status_code = code
self.text = text
def json(self):
return json.loads(self.text)
def raise_for_status(self):
if self.status_code > 300:
raise requests.exception.HTTPError
class LXDTestClient(test.TestCase):
def setUp(self):
super(LXDTestClient, self).setUp()
self.client = client.Client('https://127.0.0.1:8443',
'client',
'key')
def test_client_defined(self):
requests.get = mock.Mock(return_value=True)
instance = self.client.defined('test')
self.assertTrue(instance)
def test_client_state(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
instance = self.client.state('test')
self.assertIn('RUNNING', instance)
def test_client_running(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
instance = self.client.running('test')
self.assertTrue(instance)
def test_client_list(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":["dc5a4fd8-a43e-486f-9e2c-fb07917f2915"]})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
instance = self.client.list()
self.assertIsInstance(instance, list)
self.assertIn('dc5a4fd8-a43e-486f-9e2c-fb07917f2915',instance)
def test_client_start(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.start('test')
self.assertTrue(instance)
def test_client_stop(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.start('test')
self.assertTrue(instance)
def test_client_reboot(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.reboot('test')
self.assertTrue(instance)
def test_client_pause(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.pause('test')
self.assertTrue(instance)
def test_client_unpause(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.unpause('test')
self.assertTrue(instance)
def test_client_reboot(self):
return_text = json.dumps({"type":"sync","result":"success",
"metadata":{"state":"RUNNING","state_code":3}})
get_return = LXDFakeResponse(code=200,
text=return_text)
requests.put = mock.Mock(return_value=get_return)
instance = self.client.reboot('test')
self.assertTrue(instance)

View File

@ -1,75 +0,0 @@
import fixtures
from oslo.config import cfg
import mock
from nova import test
from nova.tests.unit import utils
from nova.tests.unit.image import fake as fake_image
from nclxd.nova.virt.lxd import driver
from nclxd.nova.virt.lxd import client
from nclxd.nova.virt.lxd import container
CONF = cfg.CONF
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
class LXDTestDriver(test.TestCase):
def setUp(self):
super(LXDTestDriver, self).setUp()
self.ctxt = utils.get_test_admin_context()
fake_image.stub_out_image_service(self.stubs)
self.flags(lxd_root_dir=self.useFixture(fixtures.TempDir()).path,
group='lxd')
self.driver = driver.LXDDriver(None, None)
@mock.patch.object(client.Client, 'list')
def test_list_instances(self, mock_list):
mock_list.return_value = ['container1']
domains = self.driver.list_instances()
self.assertIsInstance(domains, list)
@mock.patch.object(client.Client, 'list')
def test_list_instances_uuid(self, mock_list):
mock_list.return_value = ['container1']
domains = self.driver.list_instances()
self.assertIsInstance(domains, list)
@mock.patch.object(container.Container, '_fetch_image')
@mock.patch.object(container.Container, '_start_container')
def test_spawn_container(self, image_info=None, instance_href=None,
network_info=None):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
network_info = utils.get_test_network_info()
self.driver.spawn(self.ctxt, instance_href, image_info,
'fake_files', 'fake_password', network_info=network_info)
@mock.patch.object(container.Container, '_fetch_image')
@mock.patch.object(container.Container, '_start_container')
@mock.patch.object(client.Client, 'destroy')
def test_destroy_container(self, image_info=None, instance_href=None,
network_info=None):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
network_info = utils.get_test_network_info()
self.driver.spawn(self.ctxt, instance_href, image_info,
'fake_files', 'fake_password', network_info=network_info)
self.driver.destroy(self.ctxt, instance_href, network_info, None, True)
@mock.patch.object(container.Container, '_fetch_image')
@mock.patch.object(container.Container, '_start_container')
@mock.patch.object(client.Client, 'reboot')
def test_reboot_container(self, image_info=None, instance_href=None,
network_info=None):
reboot_type = "SOFT"
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
network_info = utils.get_test_network_info()
self.driver.spawn(self.ctxt, instance_href, image_info,
'fake_files', 'fake_password', network_info=network_info)
self.driver.reboot(self.ctxt, instance_href, network_info,
reboot_type)

87
nclxd/tests/test_host.py Normal file
View File

@ -0,0 +1,87 @@
import contextlib
import platform
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from nova import test
from nova.virt import fake
from nclxd.nova.virt.lxd import driver
from nclxd.nova.virt.lxd import host
from nova import utils
CONF = cfg.CONF
class LXDTestHostCase(test.NoDBTestCase):
def setUp(self):
super(LXDTestHostCase, self).setUp()
self.connection = driver.LXDDriver(fake.FakeVirtAPI())
def test_get_available_resource(self):
memory = {
'total': 4 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 10 * units.Gi,
'available': 3 * units.Gi,
'used': 1 * units.Gi
}
cpu_info = {
'arch': 'x86_64',
'model': 'Intel(R) Pentium(R) CPU J2900 @ 2.41GHz',
'vendor': 'GenuineIntel',
'sockets': 1,
'cores': 4,
'threads': 1,
'topology': {'sockets': 1,
'cores': 4,
'threads': 1
},
'features': 'fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov'
'pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe '
'syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_'
'good nopl xtopology nonstop_tsc aperfmperf pni pclmul'
'qdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16'
'xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer'
'rdrand lahf_lm 3dnowprefetch ida arat epb dtherm tpr_shadow'
' vnmi flexpriority ept vpid tsc_adjust smep erms'
}
with contextlib.nested(
mock.patch.object(host.Host, '_get_fs_info',
return_value=disk),
mock.patch.object(host.Host, '_get_memory_mb_usage',
return_value=memory),
mock.patch.object(host.Host, '_get_cpu_info',
return_value=cpu_info)
) as (
_get_fs_info,
_get_memory_mb_usage,
_get_cpu_info
):
stats = self.connection.get_available_resource("compute1")
self.assertEquals(stats['vcpus'], 4)
self.assertEquals(stats['memory_mb'], 4)
self.assertEquals(stats['memory_mb_used'], 1)
self.assertEquals(stats['local_gb'], 10)
self.assertEquals(stats['local_gb_used'], 1)
self.assertEquals(stats['vcpus_used'], 0)
self.assertEquals(stats['hypervisor_type'], 'lxd')
self.assertEquals(stats['hypervisor_version'], 1)
self.assertEquals(stats['hypervisor_hostname'], platform.node())
def test_get_host_ip_addr(self):
ip = self.connection.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
#@mock.patch('nova.utils.execute')
#def test_get_host_uptime(self, mock_execute):
# self.connection.get_host_uptime()
# mock_execute.assert_has_calls([
# mock.call('env', 'LANG=C', 'uptime')])

71
nclxd/tests/test_utils.py Normal file
View File

@ -0,0 +1,71 @@
# Copyright 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
from oslo_config import cfg
from nova import test
from nova.tests.unit import fake_instance
from nclxd.nova.virt.lxd import container_utils
CONF = cfg.CONF
class LXDUitlsTestCase(test.NoDBTestCase):
def test_get_base_dir(self):
path = container_utils.get_base_dir()
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.assertEqual(expected_path, path)
def test_get_container_image(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_container_image(instance)
expected_path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'%s.tar.gz' % instance.image_ref)
self.assertEqual(expected_path, path)
def test_get_console_path(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_console_path(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid,
'console.log')
self.assertEqual(expected_path, path)
def test_get_container_dir(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
path = container_utils.get_container_dir(instance)
expected_path = os.path.join(CONF.lxd.lxd_root_dir,
'lxc',
instance.uuid)
self.assertEqual(expected_path, path)
@mock.patch('nova.virt.images.fetch')
def test_fetch_image(self, mock_images):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
context = 'opaque context'
target = '/tmp/targetfile'
container_utils.fetch_image(context, target, instance)
mock_images.assert_called_once_with(context, None, target,
instance.user_id, instance.project_id,
max_size=0)

View File

@ -1,5 +1,48 @@
from nova import test
import contextlib
class LXDTestNetwork(test.TestCase):
def setUp(self):
super(LXDTestNetwork, self).setUp()
import mock
from oslo_config import cfg
from nova import test
from nova.network import linux_net
from nova.network import model as network_model
from nova.virt.lxd import driver as lxd_driver
from nova import exception
from nova import utils
cfg = cfg.CONF
class LXDVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
def setUp(self):
super(LXDVifTestCase(), self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)

View File

@ -10,4 +10,4 @@ oslo.concurrency>=1.4.1 # Apache-2.0
oslo.utils>=1.2.0 # Apache-2.0
oslo.i18n>=1.3.0 # Apache-2.0
oslo.log
eventlet
-e git://github.com/lxc/pylxd.git#egg=pylxd