compute, virt: support boot-from-volume without ephemeral device and no device

This patch implements basic ebs boot support.
ephemeral device/no device isn't supported yet.
This commit is contained in:
Isaku Yamahata
2011-05-27 11:10:03 +09:00
parent 79779eae78
commit 945d566c10
11 changed files with 243 additions and 39 deletions

View File

@@ -34,6 +34,7 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -52,6 +53,18 @@ def generate_default_hostname(instance_id):
return str(instance_id)
def _is_able_to_shutdown(instance, instance_id):
states = {'terminating': "Instance %s is already being terminated",
'migrating': "Instance %s is being migrated",
'stopping': "Instance %s is being stopped"}
msg = states.get(instance['state_description'])
if msg:
LOG.warning(_(msg), instance_id)
return False
return True
class API(base.Base):
"""API for interacting with the compute manager."""
@@ -238,6 +251,22 @@ class API(base.Base):
instance_id,
security_group_id)
# tell vm driver to attach volume at boot time by updating
# BlockDeviceMapping
for bdm in block_device_mapping:
LOG.debug(_('bdm %s'), bdm)
assert bdm.has_key('device_name')
values = {
'instance_id': instance_id,
'device_name': bdm['device_name'],
'delete_on_termination': bdm.get('delete_on_termination'),
'virtual_name': bdm.get('virtual_name'),
'snapshot_id': bdm.get('snapshot_id'),
'volume_id': bdm.get('volume_id'),
'volume_size': bdm.get('volume_size'),
'no_device': bdm.get('no_device')}
self.db.block_device_mapping_create(elevated, values)
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
@@ -365,24 +394,22 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
def _get_instance(self, context, instance_id, action_str):
try:
return self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %(instance_id)s was not found during "
"%(action_str)s") %
{'instance_id': instance_id, 'action_str': action_str})
raise
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
instance = self._get_instance(context, instance_id, 'terminating')
if instance['state_description'] == 'terminating':
LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
if instance['state_description'] == 'migrating':
LOG.warning(_("Instance %s is being migrated"), instance_id)
if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
@@ -396,6 +423,7 @@ class API(base.Base):
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id)
def get(self, context, instance_id):

View File

@@ -54,6 +54,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -215,7 +216,59 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
'block_device_mapping')
block_device_mapping = []
try:
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance_id)
except exception.NotFound:
pass
else:
volume_api = volume.API()
for bdm in bdms:
LOG.debug(_("setting up bdm %s"), bdm)
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
vol = volume_api.create(context, bdm['volume_size'],
bdm['snapshot_id'], '', '')
# TODO(yamahata): creatning volume simulteneously
# reduce creation time?
volume_api.wait_creation(context, vol['id'])
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
assert ((bdm['snapshot_id'] is None) or
(bdm['volume_id'] is not None))
if bdm['volume_id'] is not None:
volume_api.check_attach(context,
volume_id=bdm['volume_id'])
dev_path = self._attach_volume_boot(context, instance_id,
bdm['volume_id'],
bdm['device_name'])
block_device_mapping.append({'device_path': dev_path,
'mount_device':
bdm['device_name']})
elif bdm['virtual_name'] is not None:
# TODO(yamahata)
LOG.debug(_('block_device_mapping: '
'ephemeral device is not supported yet'))
else:
# TODO(yamahata)
assert bdm['no_device']
LOG.debug(_('block_device_mapping: '
'no device is not supported yet'))
return block_device_mapping
def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
@@ -249,11 +302,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager.setup_compute_network(context,
instance_id)
block_device_mapping = self._setup_block_device_mapping(context,
instance_id)
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
self.driver.spawn(instance_ref)
self.driver.spawn(instance_ref,
block_device_mapping=block_device_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
@@ -786,6 +843,22 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref)
def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instnace at boot time. So actual attach
is done by instance creation"""
# TODO(yamahata):
# should move check_attach to volume manager?
volume.API().check_attach(context, volume_id)
context = context.elevated()
LOG.audit(_("instance %(instance_id)s: booting with "
"volume %(volume_id)s at %(mountpoint)s") %
locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
self.db.volume_attached(context, volume_id, instance_id, mountpoint)
return dev_path
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -803,6 +876,16 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id,
instance_id,
mountpoint)
values = {
'instance_id': instance_id,
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
@@ -817,7 +900,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
def detach_volume(self, context, instance_id, volume_id):
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -833,8 +916,15 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
if destroy_bdm:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance_id, volume_id)
return True
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
return self._detach_volume(context, instance_id, volume_id, True)
def remove_volume(self, context, volume_id):
"""Remove volume on compute host.

34
nova/compute/utils.py Normal file
View File

@@ -0,0 +1,34 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 VA Linux Systems Japan K.K
# Copyright (c) 2011 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import volume
def terminate_volumes(db, context, instance_id):
"""delete volumes of delete_on_termination=True in block device mapping"""
try:
bdms = db.block_device_mapping_get_all_by_instance(
context, instance_id)
except exception.NotFound:
pass
else:
volume_api = volume.API()
for bdm in bdms:
#LOG.debug(_("terminating bdm %s") % bdm)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume_api.delete(context, bdm['volume_id'])
db.block_device_mapping_destroy(context, bdm['id'])

View File

@@ -61,7 +61,7 @@ class ComputeDriver(object):
"""Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError()
def spawn(self, instance, network_info=None):
def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Launch a VM for the specified instance"""
raise NotImplementedError()

View File

@@ -114,7 +114,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""
Create a new instance/VM/domain on the virtualization platform.

View File

@@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=[]):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:

View File

@@ -67,11 +67,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
#if not ($getVar('ebs_root', False))
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
#end if
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
@@ -79,6 +81,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
#for $vol in $volumes
<disk type='block'>
<driver type='raw'/>
<source dev='${vol.device_path}'/>
<target dev='${vol.mount_device}' bus='${disk_bus}'/>
</disk>
#end for
#end if
#end if

View File

@@ -39,6 +39,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
import multiprocessing
import os
import random
import re
import shutil
import subprocess
import sys
@@ -207,6 +208,8 @@ def _get_network_info(instance):
network_info.append((network, mapping))
return network_info
def _strip_dev(mount_path):
return re.sub(r'^/dev/', '', mount_path)
class LibvirtConnection(driver.ComputeDriver):
@@ -619,11 +622,13 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception
def spawn(self, instance, network_info=None):
xml = self.to_xml(instance, False, network_info)
def spawn(self, instance, network_info=None, block_device_mapping=[]):
xml = self.to_xml(instance, False, network_info=network_info,
block_device_mapping=block_device_mapping)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info=network_info)
self._create_image(instance, xml, network_info=network_info,
block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -805,7 +810,7 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None):
network_info=None, block_device_mapping=[]):
if not network_info:
network_info = _get_network_info(inst)
@@ -868,16 +873,19 @@ class LibvirtConnection(driver.ComputeDriver):
size = None
root_fname += "_sm"
self._cache_image(fn=self._fetch_image,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
user=user,
project=project,
size=size)
if not self._volume_in_mapping(self.root_mount_device,
block_device_mapping):
self._cache_image(fn=self._fetch_image,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
user=user,
project=project,
size=size)
if inst_type['local_gb']:
if inst_type['local_gb'] and not self._volume_in_mapping(
self.local_mount_device, block_device_mapping):
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
fname="local_%s" % inst_type['local_gb'],
@@ -992,7 +1000,18 @@ class LibvirtConnection(driver.ComputeDriver):
return result
def _prepare_xml_info(self, instance, rescue=False, network_info=None):
root_mount_device = 'vda' # FIXME for now. it's hard coded.
local_mount_device = 'vdb' # FIXME for now. it's hard coded.
def _volume_in_mapping(self, mount_device, block_device_mapping):
mount_device_ = _strip_dev(mount_device)
for vol in block_device_mapping:
vol_mount_device = _strip_dev(vol['mount_device'])
if vol_mount_device == mount_device_:
return True
return False
def _prepare_xml_info(self, instance, rescue=False, network_info=None,
block_device_mapping=[]):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -1010,6 +1029,16 @@ class LibvirtConnection(driver.ComputeDriver):
else:
driver_type = 'raw'
for vol in block_device_mapping:
vol['mount_device'] = _strip_dev(vol['mount_device'])
ebs_root = self._volume_in_mapping(self.root_mount_device,
block_device_mapping)
if self._volume_in_mapping(self.local_mount_device,
block_device_mapping):
local_gb = False
else:
local_gb = inst_type['local_gb']
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -1017,9 +1046,11 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
'local': inst_type['local_gb'],
'local': local_gb,
'driver_type': driver_type,
'nics': nics}
'nics': nics,
'ebs_root': ebs_root,
'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
@@ -1034,10 +1065,12 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info
def to_xml(self, instance, rescue=False, network_info=None):
def to_xml(self, instance, rescue=False, network_info=None,
block_device_mapping=[]):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml_info = self._prepare_xml_info(instance, rescue, network_info,
block_device_mapping)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml

View File

@@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Create VM instance."""
self._vmops.spawn(instance)

View File

@@ -194,7 +194,7 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Create VM instance"""
self._vmops.spawn(instance)

View File

@@ -22,6 +22,8 @@ Handles all requests relating to volumes.
import datetime
from eventlet import greenthread
from nova import db
from nova import exception
from nova import flags
@@ -74,6 +76,14 @@ class API(base.Base):
"snapshot_id": snapshot_id}})
return volume
# TODO(yamahata): eliminate dumb polling
def wait_creation(self, context, volume_id):
while True:
volume = self.get(context, volume_id)
if volume['status'] != 'creating':
return
greenthread.sleep(1)
def delete(self, context, volume_id):
volume = self.get(context, volume_id)
if volume['status'] != "available":