compute, virt: support boot-from-volume without ephemeral device and no device

This patch implements basic ebs boot support.
ephemeral device/no device isn't supported yet.
This commit is contained in:
Isaku Yamahata
2011-05-27 11:10:03 +09:00
parent 79779eae78
commit 945d566c10
11 changed files with 243 additions and 39 deletions

View File

@@ -34,6 +34,7 @@ from nova import utils
from nova import volume from nova import volume
from nova.compute import instance_types from nova.compute import instance_types
from nova.compute import power_state from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api from nova.scheduler import api as scheduler_api
from nova.db import base from nova.db import base
@@ -52,6 +53,18 @@ def generate_default_hostname(instance_id):
return str(instance_id) return str(instance_id)
def _is_able_to_shutdown(instance, instance_id):
states = {'terminating': "Instance %s is already being terminated",
'migrating': "Instance %s is being migrated",
'stopping': "Instance %s is being stopped"}
msg = states.get(instance['state_description'])
if msg:
LOG.warning(_(msg), instance_id)
return False
return True
class API(base.Base): class API(base.Base):
"""API for interacting with the compute manager.""" """API for interacting with the compute manager."""
@@ -238,6 +251,22 @@ class API(base.Base):
instance_id, instance_id,
security_group_id) security_group_id)
# tell vm driver to attach volume at boot time by updating
# BlockDeviceMapping
for bdm in block_device_mapping:
LOG.debug(_('bdm %s'), bdm)
assert bdm.has_key('device_name')
values = {
'instance_id': instance_id,
'device_name': bdm['device_name'],
'delete_on_termination': bdm.get('delete_on_termination'),
'virtual_name': bdm.get('virtual_name'),
'snapshot_id': bdm.get('snapshot_id'),
'volume_id': bdm.get('volume_id'),
'volume_size': bdm.get('volume_size'),
'no_device': bdm.get('no_device')}
self.db.block_device_mapping_create(elevated, values)
# Set sane defaults if not specified # Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id)) updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or if (not hasattr(instance, 'display_name') or
@@ -365,24 +394,22 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs) rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems()) return dict(rv.iteritems())
def _get_instance(self, context, instance_id, action_str):
try:
return self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %(instance_id)s was not found during "
"%(action_str)s") %
{'instance_id': instance_id, 'action_str': action_str})
raise
@scheduler_api.reroute_compute("delete") @scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id): def delete(self, context, instance_id):
"""Terminate an instance.""" """Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id) LOG.debug(_("Going to try to terminate %s"), instance_id)
try: instance = self._get_instance(context, instance_id, 'terminating')
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
if instance['state_description'] == 'terminating': if not _is_able_to_shutdown(instance, instance_id):
LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
if instance['state_description'] == 'migrating':
LOG.warning(_("Instance %s is being migrated"), instance_id)
return return
self.update(context, self.update(context,
@@ -396,6 +423,7 @@ class API(base.Base):
self._cast_compute_message('terminate_instance', context, self._cast_compute_message('terminate_instance', context,
instance_id, host) instance_id, host)
else: else:
terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id) self.db.instance_destroy(context, instance_id)
def get(self, context, instance_id): def get(self, context, instance_id):

View File

@@ -54,6 +54,7 @@ from nova import rpc
from nova import utils from nova import utils
from nova import volume from nova import volume
from nova.compute import power_state from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.virt import driver from nova.virt import driver
@@ -215,7 +216,59 @@ class ComputeManager(manager.SchedulerDependentManager):
""" """
return self.driver.refresh_security_group_members(security_group_id) return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
'block_device_mapping')
block_device_mapping = []
try:
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance_id)
except exception.NotFound:
pass
else:
volume_api = volume.API()
for bdm in bdms:
LOG.debug(_("setting up bdm %s"), bdm)
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
vol = volume_api.create(context, bdm['volume_size'],
bdm['snapshot_id'], '', '')
# TODO(yamahata): creatning volume simulteneously
# reduce creation time?
volume_api.wait_creation(context, vol['id'])
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
assert ((bdm['snapshot_id'] is None) or
(bdm['volume_id'] is not None))
if bdm['volume_id'] is not None:
volume_api.check_attach(context,
volume_id=bdm['volume_id'])
dev_path = self._attach_volume_boot(context, instance_id,
bdm['volume_id'],
bdm['device_name'])
block_device_mapping.append({'device_path': dev_path,
'mount_device':
bdm['device_name']})
elif bdm['virtual_name'] is not None:
# TODO(yamahata)
LOG.debug(_('block_device_mapping: '
'ephemeral device is not supported yet'))
else:
# TODO(yamahata)
assert bdm['no_device']
LOG.debug(_('block_device_mapping: '
'no device is not supported yet'))
return block_device_mapping
def run_instance(self, context, instance_id, **kwargs): def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options.""" """Launch a new instance with specified options."""
context = context.elevated() context = context.elevated()
@@ -249,11 +302,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager.setup_compute_network(context, self.network_manager.setup_compute_network(context,
instance_id) instance_id)
block_device_mapping = self._setup_block_device_mapping(context,
instance_id)
# TODO(vish) check to make sure the availability zone matches # TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING) self._update_state(context, instance_id, power_state.BUILDING)
try: try:
self.driver.spawn(instance_ref) self.driver.spawn(instance_ref,
block_device_mapping=block_device_mapping)
except Exception as ex: # pylint: disable=W0702 except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is " msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: " "virtualization enabled in the BIOS? Details: "
@@ -786,6 +843,22 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref) return self.driver.get_vnc_console(instance_ref)
def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instnace at boot time. So actual attach
is done by instance creation"""
# TODO(yamahata):
# should move check_attach to volume manager?
volume.API().check_attach(context, volume_id)
context = context.elevated()
LOG.audit(_("instance %(instance_id)s: booting with "
"volume %(volume_id)s at %(mountpoint)s") %
locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
self.db.volume_attached(context, volume_id, instance_id, mountpoint)
return dev_path
@checks_instance_lock @checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint): def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance.""" """Attach a volume to an instance."""
@@ -803,6 +876,16 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id, volume_id,
instance_id, instance_id,
mountpoint) mountpoint)
values = {
'instance_id': instance_id,
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702 except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we # NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same # log the traceback here and reraise the same
@@ -817,7 +900,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception @exception.wrap_exception
@checks_instance_lock @checks_instance_lock
def detach_volume(self, context, instance_id, volume_id): def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance.""" """Detach a volume from an instance."""
context = context.elevated() context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
@@ -833,8 +916,15 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_ref['mountpoint']) volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id) self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id) self.db.volume_detached(context, volume_id)
if destroy_bdm:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance_id, volume_id)
return True return True
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
return self._detach_volume(context, instance_id, volume_id, True)
def remove_volume(self, context, volume_id): def remove_volume(self, context, volume_id):
"""Remove volume on compute host. """Remove volume on compute host.

34
nova/compute/utils.py Normal file
View File

@@ -0,0 +1,34 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 VA Linux Systems Japan K.K
# Copyright (c) 2011 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import volume
def terminate_volumes(db, context, instance_id):
"""delete volumes of delete_on_termination=True in block device mapping"""
try:
bdms = db.block_device_mapping_get_all_by_instance(
context, instance_id)
except exception.NotFound:
pass
else:
volume_api = volume.API()
for bdm in bdms:
#LOG.debug(_("terminating bdm %s") % bdm)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume_api.delete(context, bdm['volume_id'])
db.block_device_mapping_destroy(context, bdm['id'])

View File

@@ -61,7 +61,7 @@ class ComputeDriver(object):
"""Return a list of InstanceInfo for all registered VMs""" """Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError() raise NotImplementedError()
def spawn(self, instance, network_info=None): def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Launch a VM for the specified instance""" """Launch a VM for the specified instance"""
raise NotImplementedError() raise NotImplementedError()

View File

@@ -114,7 +114,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance)) info_list.append(self._map_to_instance_info(instance))
return info_list return info_list
def spawn(self, instance): def spawn(self, instance, network_info=None, block_device_mapping=[]):
""" """
Create a new instance/VM/domain on the virtualization platform. Create a new instance/VM/domain on the virtualization platform.

View File

@@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos return instance_infos
def spawn(self, instance): def spawn(self, instance, network_info=None, block_device_mapping=[]):
""" Create a new VM and start it.""" """ Create a new VM and start it."""
vm = self._lookup(instance.name) vm = self._lookup(instance.name)
if vm is not None: if vm is not None:

View File

@@ -67,11 +67,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/> <target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk> </disk>
#else #else
#if not ($getVar('ebs_root', False))
<disk type='file'> <disk type='file'>
<driver type='${driver_type}'/> <driver type='${driver_type}'/>
<source file='${basepath}/disk'/> <source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/> <target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk> </disk>
#end if
#if $getVar('local', False) #if $getVar('local', False)
<disk type='file'> <disk type='file'>
<driver type='${driver_type}'/> <driver type='${driver_type}'/>
@@ -79,6 +81,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/> <target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk> </disk>
#end if #end if
#for $vol in $volumes
<disk type='block'>
<driver type='raw'/>
<source dev='${vol.device_path}'/>
<target dev='${vol.mount_device}' bus='${disk_bus}'/>
</disk>
#end for
#end if #end if
#end if #end if

View File

@@ -39,6 +39,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
import multiprocessing import multiprocessing
import os import os
import random import random
import re
import shutil import shutil
import subprocess import subprocess
import sys import sys
@@ -207,6 +208,8 @@ def _get_network_info(instance):
network_info.append((network, mapping)) network_info.append((network, mapping))
return network_info return network_info
def _strip_dev(mount_path):
return re.sub(r'^/dev/', '', mount_path)
class LibvirtConnection(driver.ComputeDriver): class LibvirtConnection(driver.ComputeDriver):
@@ -619,11 +622,13 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics # NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet) # for xenapi(tr3buchet)
@exception.wrap_exception @exception.wrap_exception
def spawn(self, instance, network_info=None): def spawn(self, instance, network_info=None, block_device_mapping=[]):
xml = self.to_xml(instance, False, network_info) xml = self.to_xml(instance, False, network_info=network_info,
block_device_mapping=block_device_mapping)
self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info=network_info) self._create_image(instance, xml, network_info=network_info,
block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml) domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name']) LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance) self.firewall_driver.apply_instance_filter(instance)
@@ -805,7 +810,7 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(vish): should we format disk by default? # TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None): network_info=None, block_device_mapping=[]):
if not network_info: if not network_info:
network_info = _get_network_info(inst) network_info = _get_network_info(inst)
@@ -868,6 +873,8 @@ class LibvirtConnection(driver.ComputeDriver):
size = None size = None
root_fname += "_sm" root_fname += "_sm"
if not self._volume_in_mapping(self.root_mount_device,
block_device_mapping):
self._cache_image(fn=self._fetch_image, self._cache_image(fn=self._fetch_image,
target=basepath('disk'), target=basepath('disk'),
fname=root_fname, fname=root_fname,
@@ -877,7 +884,8 @@ class LibvirtConnection(driver.ComputeDriver):
project=project, project=project,
size=size) size=size)
if inst_type['local_gb']: if inst_type['local_gb'] and not self._volume_in_mapping(
self.local_mount_device, block_device_mapping):
self._cache_image(fn=self._create_local, self._cache_image(fn=self._create_local,
target=basepath('disk.local'), target=basepath('disk.local'),
fname="local_%s" % inst_type['local_gb'], fname="local_%s" % inst_type['local_gb'],
@@ -992,7 +1000,18 @@ class LibvirtConnection(driver.ComputeDriver):
return result return result
def _prepare_xml_info(self, instance, rescue=False, network_info=None): root_mount_device = 'vda' # FIXME for now. it's hard coded.
local_mount_device = 'vdb' # FIXME for now. it's hard coded.
def _volume_in_mapping(self, mount_device, block_device_mapping):
mount_device_ = _strip_dev(mount_device)
for vol in block_device_mapping:
vol_mount_device = _strip_dev(vol['mount_device'])
if vol_mount_device == mount_device_:
return True
return False
def _prepare_xml_info(self, instance, rescue=False, network_info=None,
block_device_mapping=[]):
# TODO(adiantum) remove network_info creation code # TODO(adiantum) remove network_info creation code
# when multinics will be completed # when multinics will be completed
if not network_info: if not network_info:
@@ -1010,6 +1029,16 @@ class LibvirtConnection(driver.ComputeDriver):
else: else:
driver_type = 'raw' driver_type = 'raw'
for vol in block_device_mapping:
vol['mount_device'] = _strip_dev(vol['mount_device'])
ebs_root = self._volume_in_mapping(self.root_mount_device,
block_device_mapping)
if self._volume_in_mapping(self.local_mount_device,
block_device_mapping):
local_gb = False
else:
local_gb = inst_type['local_gb']
xml_info = {'type': FLAGS.libvirt_type, xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'], 'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path, 'basepath': os.path.join(FLAGS.instances_path,
@@ -1017,9 +1046,11 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024, 'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'], 'vcpus': inst_type['vcpus'],
'rescue': rescue, 'rescue': rescue,
'local': inst_type['local_gb'], 'local': local_gb,
'driver_type': driver_type, 'driver_type': driver_type,
'nics': nics} 'nics': nics,
'ebs_root': ebs_root,
'volumes': block_device_mapping}
if FLAGS.vnc_enabled: if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc': if FLAGS.libvirt_type != 'lxc':
@@ -1034,10 +1065,12 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk" xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info return xml_info
def to_xml(self, instance, rescue=False, network_info=None): def to_xml(self, instance, rescue=False, network_info=None,
block_device_mapping=[]):
# TODO(termie): cache? # TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name']) LOG.debug(_('instance %s: starting toXML method'), instance['name'])
xml_info = self._prepare_xml_info(instance, rescue, network_info) xml_info = self._prepare_xml_info(instance, rescue, network_info,
block_device_mapping)
xml = str(Template(self.libvirt_xml, searchList=[xml_info])) xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name']) LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml return xml

View File

@@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances.""" """List VM instances."""
return self._vmops.list_instances() return self._vmops.list_instances()
def spawn(self, instance): def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Create VM instance.""" """Create VM instance."""
self._vmops.spawn(instance) self._vmops.spawn(instance)

View File

@@ -194,7 +194,7 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self): def list_instances_detail(self):
return self._vmops.list_instances_detail() return self._vmops.list_instances_detail()
def spawn(self, instance): def spawn(self, instance, network_info=None, block_device_mapping=[]):
"""Create VM instance""" """Create VM instance"""
self._vmops.spawn(instance) self._vmops.spawn(instance)

View File

@@ -22,6 +22,8 @@ Handles all requests relating to volumes.
import datetime import datetime
from eventlet import greenthread
from nova import db from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
@@ -74,6 +76,14 @@ class API(base.Base):
"snapshot_id": snapshot_id}}) "snapshot_id": snapshot_id}})
return volume return volume
# TODO(yamahata): eliminate dumb polling
def wait_creation(self, context, volume_id):
while True:
volume = self.get(context, volume_id)
if volume['status'] != 'creating':
return
greenthread.sleep(1)
def delete(self, context, volume_id): def delete(self, context, volume_id):
volume = self.get(context, volume_id) volume = self.get(context, volume_id)
if volume['status'] != "available": if volume['status'] != "available":