Rebased to nova revision 752.

This commit is contained in:
sateesh 2011-03-02 01:24:34 +05:30
commit 7d3b9cae71
16 changed files with 746 additions and 192 deletions

View File

@ -40,4 +40,5 @@
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>

View File

@ -46,7 +46,8 @@ Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveed.massjouni@rackspace.com>
Naveed Massjouni <naveedm9@gmail.com>
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>

View File

@ -890,7 +890,6 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
image = self.image_service.show(context, image_id)
image = self._format_image(context,
self.image_service.show(context,
image_id))

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import traceback
@ -50,7 +51,8 @@ def _translate_detail_keys(inst):
power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
power_state.CRASHED: 'error',
power_state.FAILED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
@ -70,14 +72,16 @@ def _translate_detail_keys(inst):
public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
inst_dict['addresses']['public'] = public_ips
inst_dict['hostId'] = ''
# Return the metadata as a dictionary
metadata = {}
for item in inst['metadata']:
metadata[item['key']] = item['value']
inst_dict['metadata'] = metadata
inst_dict['hostId'] = ''
if inst['host']:
inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
return dict(server=inst_dict)
@ -135,25 +139,6 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
def _get_kernel_ramdisk_from_image(self, req, image_id):
"""
Machine images are associated with Kernels and Ramdisk images via
metadata stored in Glance as 'image_properties'
"""
def lookup(param):
_image_id = image_id
try:
return image['properties'][param]
except KeyError:
LOG.debug(
_("%(param)s property not found for image %(_image_id)s") %
locals())
return None
image_id = str(image_id)
image = self._image_service.show(req.environ['nova.context'], image_id)
return lookup('kernel_id'), lookup('ramdisk_id')
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@ -377,3 +362,37 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
def _get_kernel_ramdisk_from_image(self, req, image_id):
"""Retrevies kernel and ramdisk IDs from Glance
Only 'machine' (ami) type use kernel and ramdisk outside of the
image.
"""
# FIXME(sirp): Since we're retrieving the kernel_id from an
# image_property, this means only Glance is supported.
# The BaseImageService needs to expose a consistent way of accessing
# kernel_id and ramdisk_id
image = self._image_service.show(req.environ['nova.context'], image_id)
if image['status'] != 'active':
raise exception.Invalid(
_("Cannot build from image %(image_id)s, status not active") %
locals())
if image['type'] != 'machine':
return None, None
try:
kernel_id = image['properties']['kernel_id']
except KeyError:
raise exception.NotFound(
_("Kernel not found for image %(image_id)s") % locals())
try:
ramdisk_id = image['properties']['ramdisk_id']
except KeyError:
raise exception.NotFound(
_("Ramdisk not found for image %(image_id)s") % locals())
return kernel_id, ramdisk_id

View File

@ -129,6 +129,7 @@ class API(base.Base):
kernel_id = image.get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdisk_id', None)
# FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None

View File

@ -45,6 +45,6 @@ def get_by_type(instance_type):
def get_by_flavor_id(flavor_id):
for instance_type, details in INSTANCE_TYPES.iteritems():
if details['flavorid'] == flavor_id:
if details['flavorid'] == int(flavor_id):
return instance_type
return FLAGS.default_instance_type

View File

@ -297,11 +297,45 @@ class ServersTest(test.TestCase):
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
self.assertEqual(s['metadata']['seq'], i)
i += 1
def test_get_all_server_details_with_host(self):
'''
We want to make sure that if two instances are on the same host, then
they return the same hostId. If two instances are on different hosts,
they should return different hostId's. In this test, there are 5
instances - 2 on one host and 3 on another.
'''
def stub_instance(id, user_id=1):
return Instance(id=id, state=0, image_id=10, user_id=user_id,
display_name='server%s' % id, host='host%s' % (id % 2))
def return_servers_with_host(context, user_id=1):
return [stub_instance(i) for i in xrange(5)]
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers_with_host)
req = webob.Request.blank('/v1.0/servers/detail')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
server_list = res_dict['servers']
host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
def test_server_pause(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(

View File

@ -26,12 +26,40 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
IMAGE_MACHINE = 1
IMAGE_KERNEL = 2
IMAGE_RAMDISK = 3
IMAGE_RAW = 4
IMAGE_VHD = 5
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'type': 'machine'},
'image_data': StringIO.StringIO('')},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'type': 'kernel'},
'image_data': StringIO.StringIO('')},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'type': 'ramdisk'},
'image_data': StringIO.StringIO('')},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'type': 'raw'},
'image_data': StringIO.StringIO('')},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'type': 'vhd'},
'image_data': StringIO.StringIO('')}}
def __init__(self, host, port=None, use_ssl=False):
pass
def get_image(self, image):
meta = {
'size': 0,
}
image_file = StringIO.StringIO('')
return meta, image_file
def get_image_meta(self, image_id):
return self.IMAGE_FIXTURES[image_id]['image_meta']
def get_image(self, image_id):
image = self.IMAGE_FIXTURES[image_id]
return image['image_meta'], image['image_data']

View File

@ -30,6 +30,7 @@ from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
LOG = logging.getLogger('nova.tests.compute')
@ -266,3 +267,10 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
def test_get_by_flavor_id(self):
type = instance_types.get_by_flavor_id(1)
self.assertEqual(type, 'm1.tiny')
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')

View File

@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
@ -284,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(1, None, None)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
def test_spawn_vhd_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(1, 2, 3)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
@ -337,3 +344,63 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""
Unit tests for code that detects the ImageType
"""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
class FakeInstance(object):
pass
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
self.fake_instance)
self.assertEqual(disk_type, dt)
def test_instance_disk(self):
"""
If a kernel is specified then the image type is DISK (aka machine)
"""
FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
def test_instance_disk_raw(self):
"""
If the kernel isn't specified, and we're not using Glance, then
DISK_RAW is assumed.
"""
FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
def test_glance_disk_raw(self):
"""
If we're using Glance, then defer to the image_type field, which in
this case will be 'raw'.
"""
FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
def test_glance_disk_vhd(self):
"""
If we're using Glance, then defer to the image_type field, which in
this case will be 'vhd'.
"""
FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)

View File

@ -177,6 +177,12 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
def SR_scan(self, session_ref, sr_ref):
pass
def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
pass
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """

View File

@ -24,6 +24,7 @@ import pickle
import re
import time
import urllib
import uuid
from xml.dom import minidom
from eventlet import event
@ -63,11 +64,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
DISK_VHD = 3
class VMHelper(HelperBase):
@ -276,29 +280,35 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
return template_vm_ref, [template_vdi_uuid, parent_uuid]
template_vdi_uuids = {'image': parent_uuid,
'snap': template_vdi_uuid}
return template_vm_ref, template_vdi_uuids
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
# NOTE(sirp): Currently we only support uploading images as VHD, there
# is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port}
'glance_port': FLAGS.glance_port,
'sr_path': get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'put_vdis', kwargs)
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(instance_id, task)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project, type):
def fetch_image(cls, session, instance_id, image, user, project,
image_type):
"""
type is interpreted as an ImageType instance
image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@ -308,35 +318,80 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
access, type)
access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
access, user.secret, type)
access, user.secret,
image_type)
@classmethod
def _fetch_image_glance(cls, session, instance_id, image, access, type):
sr = find_sr(session)
if sr is None:
raise exception.NotFound('Cannot find SR to write VDI to')
def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
image_type):
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
sr_ref = safe_find_sr(session)
meta, image_file = c.get_image(image)
# NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
# have the `uuid` module. To work around this, we generate the uuids
# here (under Python 2.6+) and pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
params = {'image_id': image,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
'uuid_stack': uuid_stack,
'sr_path': get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
vdi_uuid = session.wait_for_task(instance_id, task)
scan_sr(session, instance_id, sr_ref)
# Set the name-label to ease debugging
vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
% locals())
return vdi_uuid
@classmethod
def _fetch_image_glance_disk(cls, session, instance_id, image, access,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_image_glance_vhd, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
sr_ref = safe_find_sr(session)
client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if type == ImageType.DISK:
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
vdi_size, False)
name_label = get_name_label_for_image(image)
vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi, False,
lambda dev:
_stream_disk(dev, type,
_stream_disk(dev, image_type,
virtual_size, image_file))
if (type == ImageType.KERNEL_RAMDISK):
if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
@ -354,21 +409,88 @@ class VMHelper(HelperBase):
else:
return session.get_xenapi().VDI.get_uuid(vdi)
@classmethod
def determine_disk_image_type(cls, instance):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
def log_disk_format(image_type):
pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
disk_format = pretty_format[image_type]
image_id = instance.image_id
instance_id = instance.id
LOG.debug(_("Detected %(disk_format)s format for image "
"%(image_id)s, instance %(instance_id)s") % locals())
def determine_from_glance():
glance_type2nova_type = {'machine': ImageType.DISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'kernel': ImageType.KERNEL_RAMDISK,
'ramdisk': ImageType.KERNEL_RAMDISK}
client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
meta = client.get_image_meta(instance.image_id)
type_ = meta['type']
try:
return glance_type2nova_type[type_]
except KeyError:
raise exception.NotFound(
_("Unrecognized image type '%(type_)s'") % locals())
def determine_from_instance():
if instance.kernel_id:
return ImageType.DISK
else:
return ImageType.DISK_RAW
# FIXME(sirp): can we unify the ImageService and xenapi_image_service
# abstractions?
if FLAGS.xenapi_image_service == 'glance':
image_type = determine_from_glance()
else:
image_type = determine_from_instance()
log_disk_format(image_type)
return image_type
@classmethod
def _fetch_image_glance(cls, session, instance_id, image, access,
image_type):
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(
session, instance_id, image, access, image_type)
else:
return cls._fetch_image_glance_disk(
session, instance_id, image, access, image_type)
@classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, type):
secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
if image_type == ImageType.KERNEL_RAMDISK:
fn = 'get_kernel'
else:
fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
if type != ImageType.KERNEL_RAMDISK:
if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
if type == ImageType.DISK_RAW:
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
uuid = session.wait_for_task(instance_id, task)
@ -376,6 +498,9 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, instance_id, vdi_ref):
"""
Determine if VDI is using a PV kernel
"""
if FLAGS.xenapi_image_service == 'glance':
return cls._lookup_image_glance(session, vdi_ref)
else:
@ -587,7 +712,18 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
def safe_find_sr(session):
"""Same as find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = find_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR to read/write VDI'))
return sr_ref
def find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
srs = session.get_xenapi().SR.get_all()
for sr in srs:
@ -602,6 +738,18 @@ def find_sr(session):
return None
def get_sr_path(session):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
sr_ref = safe_find_sr(session)
sr_rec = session.get_xenapi().SR.get_record(sr_ref)
sr_uuid = sr_rec["uuid"]
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
@ -715,9 +863,9 @@ def _is_vdi_pv(dev):
return False
def _stream_disk(dev, type, virtual_size, image_file):
def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
if type == ImageType.DISK:
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@ -746,3 +894,8 @@ def _write_partition(virtual_size, dev):
(dest, primary_first, primary_last))
LOG.debug(_('Writing partition table %s done.'), dest)
def get_name_label_for_image(image):
# TODO(sirp): This should eventually be the URI for the Glance image
return _('Glance image %s') % image

View File

@ -80,27 +80,33 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
#if kernel is not present we must download a raw disk
if instance.kernel_id:
disk_image_type = ImageType.DISK
else:
disk_image_type = ImageType.DISK_RAW
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
#Have a look at the VDI and see if it has a PV kernel
pv_kernel = False
if not instance.kernel_id:
if disk_image_type == ImageType.DISK_RAW:
#Have a look at the VDI and see if it has a PV kernel
pv_kernel = VMHelper.lookup_image(self._session, instance.id,
vdi_ref)
elif disk_image_type == ImageType.DISK_VHD:
# TODO(sirp): Assuming PV for now; this will need to be
# configurable as Windows will use HVM.
pv_kernel = True
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
@ -239,7 +245,8 @@ class VMOps(object):
VMHelper.upload_image(
self._session, instance.id, template_vdi_uuids, image_id)
finally:
self._destroy(instance, template_vm_ref, shutdown=False)
self._destroy(instance, template_vm_ref, shutdown=False,
destroy_kernel_ramdisk=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
@ -321,6 +328,9 @@ class VMOps(object):
locals())
return
instance_id = instance.id
LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
% locals())
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
@ -329,6 +339,9 @@ class VMOps(object):
def _destroy_vdis(self, instance, vm):
"""Destroys all VDIs associated with a VM """
instance_id = instance.id
LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
% locals())
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
if not vdis:
@ -341,29 +354,56 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
def _destroy_kernel_ramdisk(self, instance, vm):
"""
Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_id = instance.id
if not instance.kernel_id and not instance.ramdisk_id:
# 1. No kernel or ramdisk
LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
"skipping kernel and ramdisk deletion") % locals())
return
if not (instance.kernel_id and instance.ramdisk_id):
# 2. We only have kernel xor ramdisk
raise exception.NotFound(
_("Instance %(instance_id)s has a kernel or ramdisk but not "
"both" % locals()))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
self._session, vm)
LOG.debug(_("Removing kernel/ramdisk files"))
args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
task = self._session.async_call_plugin(
'glance', 'remove_kernel_ramdisk', args)
self._session.wait_for_task(instance.id, task)
LOG.debug(_("kernel/ramdisk files removed"))
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
instance_id = instance.id
try:
kernel = None
ramdisk = None
if instance.kernel_id or instance.ramdisk_id:
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
self._session, vm)
task1 = self._session.call_xenapi('Async.VM.destroy', vm)
LOG.debug(_("Removing kernel/ramdisk files"))
fn = "remove_kernel_ramdisk"
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
task2 = self._session.async_call_plugin('glance', fn, args)
self._session.wait_for_task(instance.id, task1)
self._session.wait_for_task(instance.id, task2)
LOG.debug(_("kernel/ramdisk files removed"))
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
def destroy(self, instance):
"""
Destroy VM instance
@ -371,26 +411,31 @@ class VMOps(object):
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
instance_id = instance.id
LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
def _destroy(self, instance, vm, shutdown=True):
def _destroy(self, instance, vm, shutdown=True,
destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
1. A shutdown if requested
2. Destroying associated VDIs
3. Destroying that actual VM record
1. A shutdown if requested
2. Destroying associated VDIs
3. Destroying kernel and ramdisk files (if necessary)
4. Destroying that actual VM record
"""
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
self._shutdown(instance, vm)
self._destroy_vdis(instance, vm)
if destroy_kernel_ramdisk:
self._destroy_kernel_ramdisk(instance, vm)
self._destroy_vm(instance, vm)
def _wait_with_callback(self, instance_id, task, callback):

View File

@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
'Base path to the storage repository')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')

View File

@ -21,17 +21,14 @@
# XenAPI plugin for managing glance images
#
import base64
import errno
import hmac
import httplib
import os
import os.path
import pickle
import sha
import shlex
import shutil
import subprocess
import time
import urlparse
import tempfile
import XenAPIPlugin
@ -41,30 +38,6 @@ configure_logging('glance')
CHUNK_SIZE = 8192
KERNEL_DIR = '/boot/guest'
FILE_SR_PATH = '/var/run/sr-mount'
def remove_kernel_ramdisk(session, args):
"""Removes kernel and/or ramdisk from dom0's file system"""
kernel_file = exists(args, 'kernel-file')
ramdisk_file = exists(args, 'ramdisk-file')
if kernel_file:
os.remove(kernel_file)
if ramdisk_file:
os.remove(ramdisk_file)
return "ok"
def copy_kernel_vdi(session, args):
vdi = exists(args, 'vdi-ref')
size = exists(args, 'image-size')
#Use the uuid as a filename
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
filename = with_vdi_in_dom0(session, vdi, False,
lambda dev:
_copy_kernel_vdi('/dev/%s' % dev, copy_args))
return filename
def _copy_kernel_vdi(dest, copy_args):
@ -89,93 +62,310 @@ def _copy_kernel_vdi(dest, copy_args):
return filename
def put_vdis(session, args):
def _download_tarball(sr_path, staging_path, image_id, glance_host,
glance_port):
"""Download the tarball image from Glance and extract it into the staging
area.
"""
conn = httplib.HTTPConnection(glance_host, glance_port)
conn.request('GET', '/images/%s' % image_id)
resp = conn.getresponse()
if resp.status == httplib.NOT_FOUND:
raise Exception("Image '%s' not found in Glance" % image_id)
elif resp.status != httplib.OK:
raise Exception("Unexpected response from Glance %i" % res.status)
tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
chunk = resp.read(CHUNK_SIZE)
while chunk:
tar_proc.stdin.write(chunk)
chunk = resp.read(CHUNK_SIZE)
_finish_subprocess(tar_proc, tar_cmd)
conn.close()
def _fixup_vhds(sr_path, staging_path, uuid_stack):
"""Fixup the downloaded VHDs before we move them into the SR.
We cannot extract VHDs directly into the SR since they don't yet have
UUIDs, aren't properly associated with each other, and would be subject to
a race-condition of one-file being present and the other not being
downloaded yet.
To avoid these we problems, we use a staging area to fixup the VHDs before
moving them into the SR. The steps involved are:
1. Extracting tarball into staging area
2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd')
3. Linking the two VHDs together
4. Pseudo-atomically moving the images into the SR. (It's not really
atomic because it takes place as two os.rename operations; however,
the chances of an SR.scan occuring between the two rename()
invocations is so small that we can safely ignore it)
"""
def rename_with_uuid(orig_path):
"""Rename VHD using UUID so that it will be recognized by SR on a
subsequent scan.
Since Python2.4 doesn't have the `uuid` module, we pass a stack of
pre-computed UUIDs from the compute worker.
"""
orig_dirname = os.path.dirname(orig_path)
uuid = uuid_stack.pop()
new_path = os.path.join(orig_dirname, "%s.vhd" % uuid)
os.rename(orig_path, new_path)
return new_path, uuid
def link_vhds(child_path, parent_path):
"""Use vhd-util to associate the snapshot VHD with its base_copy.
This needs to be done before we move both VHDs into the SR to prevent
the base_copy from being DOA (deleted-on-arrival).
"""
modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s"
% locals())
modify_proc = _make_subprocess(modify_cmd, stderr=True)
_finish_subprocess(modify_proc, modify_cmd)
def move_into_sr(orig_path):
"""Move a file into the SR"""
filename = os.path.basename(orig_path)
new_path = os.path.join(sr_path, filename)
os.rename(orig_path, new_path)
return new_path
def assert_vhd_not_hidden(path):
"""
This is a sanity check on the image; if a snap.vhd isn't
present, then the image.vhd better not be marked 'hidden' or it will
be deleted when moved into the SR.
"""
query_cmd = "vhd-util query -n %(path)s -f" % locals()
query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True)
out, err = _finish_subprocess(query_proc, query_cmd)
for line in out.splitlines():
if line.startswith('hidden'):
value = line.split(':')[1].strip()
if value == "1":
raise Exception(
"VHD %(path)s is marked as hidden without child" %
locals())
orig_base_copy_path = os.path.join(staging_path, 'image.vhd')
if not os.path.exists(orig_base_copy_path):
raise Exception("Invalid image: image.vhd not present")
base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path)
vdi_uuid = base_copy_uuid
orig_snap_path = os.path.join(staging_path, 'snap.vhd')
if os.path.exists(orig_snap_path):
snap_path, snap_uuid = rename_with_uuid(orig_snap_path)
vdi_uuid = snap_uuid
# NOTE(sirp): this step is necessary so that an SR scan won't
# delete the base_copy out from under us (since it would be
# orphaned)
link_vhds(snap_path, base_copy_path)
move_into_sr(snap_path)
else:
assert_vhd_not_hidden(base_copy_path)
move_into_sr(base_copy_path)
return vdi_uuid
def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
"""Hard-link VHDs into staging area with appropriate filename
('snap' or 'image.vhd')
"""
for name, uuid in vdi_uuids.items():
source = os.path.join(sr_path, "%s.vhd" % uuid)
link_name = os.path.join(staging_path, "%s.vhd" % name)
os.link(source, link_name)
def _upload_tarball(staging_path, image_id, glance_host, glance_port):
"""
Create a tarball of the image and then stream that into Glance
using chunked-transfer-encoded HTTP.
"""
conn = httplib.HTTPConnection(glance_host, glance_port)
# NOTE(sirp): httplib under python2.4 won't accept a file-like object
# to request
conn.putrequest('PUT', '/images/%s' % image_id)
# TODO(sirp): make `store` configurable
headers = {
'content-type': 'application/octet-stream',
'transfer-encoding': 'chunked',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'queued',
'x-image-meta-type': 'vhd'
}
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals()
tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True)
chunk = tar_proc.stdout.read(CHUNK_SIZE)
while chunk:
conn.send("%x\r\n%s\r\n" % (len(chunk), chunk))
chunk = tar_proc.stdout.read(CHUNK_SIZE)
conn.send("0\r\n\r\n")
_finish_subprocess(tar_proc, tar_cmd)
resp = conn.getresponse()
if resp.status != httplib.OK:
raise Exception("Unexpected response from Glance %i" % resp.status)
conn.close()
def _make_staging_area(sr_path):
"""
The staging area is a place where we can temporarily store and
manipulate VHDs. The use of the staging area is different for upload and
download:
Download
========
When we download the tarball, the VHDs contained within will have names
like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before
moving them into the SR. However, since 'image.vhd' may be a base_copy, we
need to link it to 'snap.vhd' (using vhd-util modify) before moving both
into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted).
The staging area gives us a place to perform these operations before they
are moved to the SR, scanned, and then registered with XenServer.
Upload
======
On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd'
in the case of the snapshot VHD, and 'image.vhd' in the case of the
base_copy. The staging area provides a directory in which we can create
hard-links to rename the VHDs without affecting what's in the SR.
NOTE
====
The staging area is created as a subdirectory within the SR in order to
guarantee that it resides within the same filesystem and therefore permit
hard-linking and cheap file moves.
"""
staging_path = tempfile.mkdtemp(dir=sr_path)
return staging_path
def _cleanup_staging_area(staging_path):
"""Remove staging area directory
On upload, the staging area contains hard-links to the VHDs in the SR;
it's safe to remove the staging-area because the SR will keep the link
count > 0 (so the VHDs in the SR will not be deleted).
"""
shutil.rmtree(staging_path)
def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False):
"""Make a subprocess according to the given command-line string
"""
kwargs = {}
kwargs['stdout'] = stdout and subprocess.PIPE or None
kwargs['stderr'] = stderr and subprocess.PIPE or None
kwargs['stdin'] = stdin and subprocess.PIPE or None
args = shlex.split(cmdline)
proc = subprocess.Popen(args, **kwargs)
return proc
def _finish_subprocess(proc, cmdline):
"""Ensure that the process returned a zero exit code indicating success
"""
out, err = proc.communicate()
ret = proc.returncode
if ret != 0:
raise Exception("'%(cmdline)s' returned non-zero exit code: "
"retcode=%(ret)i, stderr='%(err)s'" % locals())
return out, err
def download_vhd(session, args):
"""Download an image from Glance, unbundle it, and then deposit the VHDs
into the storage repository
"""
params = pickle.loads(exists(args, 'params'))
image_id = params["image_id"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
uuid_stack = params["uuid_stack"]
sr_path = params["sr_path"]
staging_path = _make_staging_area(sr_path)
try:
_download_tarball(sr_path, staging_path, image_id, glance_host,
glance_port)
vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack)
return vdi_uuid
finally:
_cleanup_staging_area(staging_path)
def upload_vhd(session, args):
"""Bundle the VHDs comprising an image and then stream them into Glance.
"""
params = pickle.loads(exists(args, 'params'))
vdi_uuids = params["vdi_uuids"]
image_id = params["image_id"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
sr_path = params["sr_path"]
sr_path = get_sr_path(session)
#FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids]
tar_cmd.extend(paths)
logging.debug("Bundling image with cmd: %s", tar_cmd)
subprocess.call(tar_cmd)
logging.debug("Writing to test file %s", tmp_file)
put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
# FIXME(sirp): return anything useful here?
return ""
def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
size = os.path.getsize(tmp_file)
basename = os.path.basename(tmp_file)
bundle = open(tmp_file, 'r')
staging_path = _make_staging_area(sr_path)
try:
headers = {
'x-image-meta-store': 'file',
'x-image-meta-is_public': 'True',
'x-image-meta-type': 'raw',
'x-image-meta-size': size,
'content-length': size,
'content-type': 'application/octet-stream',
}
conn = httplib.HTTPConnection(glance_host, glance_port)
#NOTE(sirp): httplib under python2.4 won't accept a file-like object
# to request
conn.putrequest('PUT', '/images/%s' % image_id)
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
chunk = bundle.read(CHUNK_SIZE)
while chunk:
conn.send(chunk)
chunk = bundle.read(CHUNK_SIZE)
res = conn.getresponse()
#FIXME(sirp): should this be 201 Created?
if res.status != httplib.OK:
raise Exception("Unexpected response from Glance %i" % res.status)
_prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
_upload_tarball(staging_path, image_id, glance_host, glance_port)
finally:
bundle.close()
_cleanup_staging_area(staging_path)
return "" # Nothing useful to return on an upload
def get_sr_path(session):
sr_ref = find_sr(session)
if sr_ref is None:
raise Exception('Cannot find SR to read VDI from')
sr_rec = session.xenapi.SR.get_record(sr_ref)
sr_uuid = sr_rec["uuid"]
sr_path = os.path.join(FILE_SR_PATH, sr_uuid)
return sr_path
def copy_kernel_vdi(session, args):
vdi = exists(args, 'vdi-ref')
size = exists(args, 'image-size')
#Use the uuid as a filename
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
filename = with_vdi_in_dom0(session, vdi, False,
lambda dev:
_copy_kernel_vdi('/dev/%s' % dev, copy_args))
return filename
#TODO(sirp): both objectstore and glance need this, should this be refactored
#into common lib
def find_sr(session):
host = get_this_host(session)
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_rec = session.xenapi.SR.get_record(sr)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
for pbd in sr_rec['PBDs']:
pbd_rec = session.xenapi.PBD.get_record(pbd)
if pbd_rec['host'] == host:
return sr
return None
def remove_kernel_ramdisk(session, args):
"""Removes kernel and/or ramdisk from dom0's file system"""
kernel_file = exists(args, 'kernel-file')
ramdisk_file = exists(args, 'ramdisk-file')
if kernel_file:
os.remove(kernel_file)
if ramdisk_file:
os.remove(ramdisk_file)
return "ok"
if __name__ == '__main__':
XenAPIPlugin.dispatch({'put_vdis': put_vdis,
XenAPIPlugin.dispatch({'upload_vhd': upload_vhd,
'download_vhd': download_vhd,
'copy_kernel_vdi': copy_kernel_vdi,
'remove_kernel_ramdisk': remove_kernel_ramdisk})

View File

@ -84,7 +84,7 @@ fi
if [ -z "$noseargs" ];
then
srcfiles=`find bin -type f ! -name "nova.conf*"`
srcfiles+=" nova setup.py"
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1
else
run_tests