From 41e4e18a0324593c0076c3936d63bb6dcca487cb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Mon, 14 Feb 2011 23:12:34 +0000 Subject: [PATCH 01/39] First cut on XenServer unified-images --- nova/api/openstack/servers.py | 6 +- nova/virt/xenapi/vm_utils.py | 47 +++- .../xenapi/etc/xapi.d/plugins/glance | 210 ++++++++++++++---- 3 files changed, 220 insertions(+), 43 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17c5519a1a91..1c9dcd9a6049 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -140,7 +140,11 @@ class Controller(wsgi.Controller): image_id = str(image_id) image = self._image_service.show(req.environ['nova.context'], image_id) - return lookup('kernel_id'), lookup('ramdisk_id') + disk_format = image['properties'].get('disk_format', None) + if disk_format == "vhd": + return None, None + else: + return lookup('kernel_id'), lookup('ramdisk_id') def create(self, req): """ Creates a new server for a given user """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4bbd522c1735..6d6067da5d02 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -266,7 +266,9 @@ class VMHelper(HelperBase): session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep - return template_vm_ref, [template_vdi_uuid, parent_uuid] + template_vdi_uuids = {'image': parent_uuid, + 'snap': template_vdi_uuid} + return template_vm_ref, template_vdi_uuids @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): @@ -303,15 +305,36 @@ class VMHelper(HelperBase): return cls._fetch_image_objectstore(session, instance_id, image, access, user.secret, type) + @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): + def _fetch_image_glance_vhd(cls, session, instance_id, image, access, type): + LOG.debug(_("Asking xapi to fetch vhd image %(image)s") + % locals()) + + sr_ref = find_sr(session) + if sr_ref is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + params = {'image_id': image, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port} + + kwargs = {'params': pickle.dumps(params)} + task = session.async_call_plugin('glance', 'get_vdi', kwargs) + vdi_uuid = session.wait_for_task(instance_id, task) + scan_sr(session, instance_id, sr_ref) + LOG.debug(_("Xapi 'get_vdi' returned VDI UUID %(vdi_uuid)s") % locals()) + return vdi_uuid + + @classmethod + def _fetch_image_glance_disk(cls, session, instance_id, image, access, type): sr = find_sr(session) if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta, image_file = c.get_image(image) + meta, image_file = client.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) @@ -344,6 +367,22 @@ class VMHelper(HelperBase): else: return session.get_xenapi().VDI.get_uuid(vdi) + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta = client.get_image_meta(image) + properties = meta['properties'] + disk_format = properties.get('disk_format', None) + + # TODO(sirp): When Glance treats disk_format as a first class + # attribute, we should start using that rather than an image-property + if disk_format == 'vhd': + return cls._fetch_image_glance_vhd( + session, instance_id, image, access, type) + else: + return cls._fetch_image_glance_disk( + session, instance_id, image, access, type) + @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, type): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aadacce57456..8352d7ee65c5 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -29,7 +29,10 @@ import os import os.path import pickle import sha +import shlex +import shutil import subprocess +import tempfile import time import urlparse @@ -66,65 +69,190 @@ def _copy_kernel_vdi(dest,copy_args): data=f.read(vdi_size) of.write(data) f.close() - of.close() + of.close() logging.debug("Done. Filename: %s",filename) - return filename + return filename + + +def execute(cmd): + args = shlex.split(cmd) + proc = subprocess.Popen( + args, stdout=subprocess.PIPE, stdin=subprocess.PIPE) + return proc + + +def get_vdi(session, args): + """ + """ + params = pickle.loads(exists(args, 'params')) + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + + def unbundle_xfer(sr_path, staging_path): + """ + + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + conn.request('GET', '/images/%s' % image_id) + resp = conn.getresponse() + if resp.status == httplib.NOT_FOUND: + raise Exception("Image '%s' not found in Glance" % image_id) + elif resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + + tar_proc = execute("tar -zx --directory=%(staging_path)s" % locals()) + chunk = resp.read(CHUNK_SIZE) + while chunk: + tar_proc.stdin.write(chunk) + chunk = resp.read(CHUNK_SIZE) + out, err = tar_proc.communicate() + # TODO(sirp): write assert_process_success + ret = tar_proc.returncode + if ret != 0: + raise Exception( + "tar returned non-zero exit code (%i): '%s'" % (ret, err)) + conn.close() + + def fixup_vhds(sr_path, staging_path): + """ + """ + def rename_with_uuid(orig_path): + """Generate a uuid and rename the file with the uuid""" + orig_dirname = os.path.dirname(orig_path) + uuid = generate_uuid() + new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) + os.rename(orig_path, new_path) + return new_path, uuid + + def move_into_sr(orig_path): + """Move a file into the SR""" + filename = os.path.basename(orig_path) + new_path = os.path.join(sr_path, filename) + #os.rename(orig_path, new_path) + # FIXME(sirp) : testing + shutil.copyfile(orig_path, new_path) + return new_path + + def link_vhds(child_path, parent_path): + proc = execute("vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + out, err = proc.communicate() + if proc.returncode != 0: + raise Exception("Failed to link vhds: '%s'" % err) + + image_path = os.path.join(staging_path, 'image') + + orig_base_copy_path = os.path.join(image_path, 'image.vhd') + if not os.path.exists(orig_base_copy_path): + raise Exception("Invalid image: image.vhd not present") + + base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) + + vdi_uuid = base_copy_uuid + orig_snap_path = os.path.join(image_path, 'snap.vhd') + if os.path.exists(orig_snap_path): + snap_path, snap_uuid = rename_with_uuid(orig_snap_path) + vdi_uuid = snap_uuid + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(snap_path, base_copy_path) + move_into_sr(snap_path) + else: + raise Exception("path '%s' not found!!!" % orig_snap_path) + move_into_sr(base_copy_path) + return vdi_uuid + + sr_path = get_sr_path(session) + staging_path = make_staging_area(sr_path) + try: + unbundle_xfer(sr_path, staging_path) + vdi_uuid = fixup_vhds(sr_path, staging_path) + return vdi_uuid + finally: + # FIXME(sirp) : testing + pass + #cleanup_staging_area(staging_path) + def put_vdis(session, args): + """ + """ params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] - - sr_path = get_sr_path(session) - #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) - tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] - paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ] - tar_cmd.extend(paths) - logging.debug("Bundling image with cmd: %s", tar_cmd) - subprocess.call(tar_cmd) - logging.debug("Writing to test file %s", tmp_file) - put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) - return "" # FIXME(sirp): return anything useful here? + def prepare_staging_area(sr_path, staging_path): + """ + Explain preparing staging area here... + """ + image_path = os.path.join(staging_path, 'image') + os.mkdir(image_path) + for name, uuid in vdi_uuids.items(): + source = os.path.join(sr_path, "%s.vhd" % uuid) + link_name = os.path.join(image_path, "%s.vhd" % name) + os.link(source, link_name) -def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): - size = os.path.getsize(tmp_file) - basename = os.path.basename(tmp_file) - - bundle = open(tmp_file, 'r') - try: - headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-size': size, - 'content-length': size, - 'content-type': 'application/octet-stream', - } + def bundle_xfer(staging_path): conn = httplib.HTTPConnection(glance_host, glance_port) #NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request conn.putrequest('PUT', '/images/%s' % image_id) + headers = { + 'x-image-meta-store': 'file', + 'x-image-meta-is_public': 'True', + 'x-image-meta-type': 'raw', + 'x-image-meta-property-disk-format': 'vhd', + 'x-image-meta-property-container-format': 'tarball', + 'transfer-encoding': "chunked", + 'content-type': 'application/octet-stream', + } for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() - - chunk = bundle.read(CHUNK_SIZE) - while chunk: - conn.send(chunk) - chunk = bundle.read(CHUNK_SIZE) - - res = conn.getresponse() + tar_proc = execute("tar -zc --directory=%(staging_path)s ." % locals()) + + chunk = tar_proc.stdout.read(CHUNK_SIZE) + while chunk: + conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) + chunk = tar_proc.stdout.read(CHUNK_SIZE) + conn.send("0\r\n\r\n") + + resp = conn.getresponse() #FIXME(sirp): should this be 201 Created? - if res.status != httplib.OK: + if resp.status != httplib.OK: raise Exception("Unexpected response from Glance %i" % res.status) + + conn.close() + + sr_path = get_sr_path(session) + staging_path = make_staging_area(sr_path) + try: + prepare_staging_area(sr_path, staging_path) + bundle_xfer(staging_path) finally: - bundle.close() + cleanup_staging_area(staging_path) + + return "" # FIXME(sirp): return anything useful here? + + +def make_staging_area(sr_path): + """ + Explain staging area here... + """ + # NOTE(sirp): staging area is created in SR to allow hard-linking + staging_path = tempfile.mkdtemp(dir=sr_path) + return staging_path + + +def cleanup_staging_area(staging_path): + shutil.rmtree(staging_path) + def get_sr_path(session): sr_ref = find_sr(session) @@ -154,7 +282,13 @@ def find_sr(session): return sr return None +def generate_uuid(): + # NOTE(sirp): Python2.4 does not include the uuid module + proc = execute("uuidgen") + uuid = proc.stdout.read().strip() + return uuid if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, + XenAPIPlugin.dispatch({'put_vdis': put_vdis, + 'get_vdi': get_vdi, 'copy_kernel_vdi': copy_kernel_vdi}) From 238ae2b5a8c559acc362a3b44160404771f1259f Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 00:00:56 +0000 Subject: [PATCH 02/39] Removing testing statements --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 8352d7ee65c5..0cb3b52db39d 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -129,9 +129,7 @@ def get_vdi(session, args): """Move a file into the SR""" filename = os.path.basename(orig_path) new_path = os.path.join(sr_path, filename) - #os.rename(orig_path, new_path) - # FIXME(sirp) : testing - shutil.copyfile(orig_path, new_path) + os.rename(orig_path, new_path) return new_path def link_vhds(child_path, parent_path): @@ -159,8 +157,7 @@ def get_vdi(session, args): # orphaned) link_vhds(snap_path, base_copy_path) move_into_sr(snap_path) - else: - raise Exception("path '%s' not found!!!" % orig_snap_path) + move_into_sr(base_copy_path) return vdi_uuid @@ -171,9 +168,7 @@ def get_vdi(session, args): vdi_uuid = fixup_vhds(sr_path, staging_path) return vdi_uuid finally: - # FIXME(sirp) : testing - pass - #cleanup_staging_area(staging_path) + cleanup_staging_area(staging_path) def put_vdis(session, args): From fe6efb38ee30c5a3e532cd19faef0fec063b7446 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 01:37:54 +0000 Subject: [PATCH 03/39] Adding DISK_VHD to ImageTypes --- nova/virt/xenapi/vm_utils.py | 38 ++++++++++++++++++++++++++++-------- nova/virt/xenapi/vmops.py | 22 ++++++++++++++------- 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6d6067da5d02..a6312d00c81b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -63,11 +63,14 @@ class ImageType: 0 - kernel/ramdisk image (goes on dom0's filesystem) 1 - disk image (local SR, partitioned by objectstore plugin) 2 - raw disk image (local SR, NOT partitioned by plugin) + 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + linux, HVM assumed for Windows) """ KERNEL_RAMDISK = 0 DISK = 1 DISK_RAW = 2 + DISK_VHD = 3 class VMHelper(HelperBase): @@ -368,15 +371,34 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta = client.get_image_meta(image) - properties = meta['properties'] - disk_format = properties.get('disk_format', None) + def determine_disk_image_type(cls, instance): + instance_id = instance.id + if instance.kernel_id: + #if kernel is not present we must download a raw disk + LOG.debug(_("Instance %(instance_id)s will use DISK format") % + locals()) + return ImageType.DISK - # TODO(sirp): When Glance treats disk_format as a first class - # attribute, we should start using that rather than an image-property - if disk_format == 'vhd': + if FLAGS.xenapi_image_service == 'glance': + # if using glance, then we could be VHD format + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta = client.get_image_meta(instance.image_id) + properties = meta['properties'] + disk_format = properties.get('disk_format', None) + # TODO(sirp): When Glance treats disk_format as a first class + # attribute, we should start using that rather than an image-property + if disk_format == 'vhd': + LOG.debug(_("Instance %(instance_id)s will use DISK_VHD format") % + locals()) + return ImageType.DISK_VHD + + LOG.debug(_("Instance %(instance_id)s will use DISK_RAW format") % + locals()) + return ImageType.DISK_RAW + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + if type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( session, instance_id, image, access, type) else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e84ce20c4964..34ceea358708 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -74,27 +74,34 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - #if kernel is not present we must download a raw disk - if instance.kernel_id: - disk_image_type = ImageType.DISK - else: - disk_image_type = ImageType.DISK_RAW + + disk_image_type = VMHelper.determine_disk_image_type(instance) + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - #Have a look at the VDI and see if it has a PV kernel + pv_kernel = False - if not instance.kernel_id: + if disk_image_type == ImageType.DISK_RAW: + #Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) + elif disk_image_type == ImageType.DISK_VHD: + # TODO(sirp): Assuming PV for now; this will need to be + # configurable as Windows will use HVM. + pv_kernel = True + kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) @@ -102,6 +109,7 @@ class VMOps(object): if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name From 15cdeef7820aacd0b1ff95da48816cba9f2544ba Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 18:01:13 +0000 Subject: [PATCH 04/39] Adding more documentation, code-cleanup --- nova/virt/xenapi/vm_utils.py | 54 +++++++++++++---- .../xenapi/etc/xapi.d/plugins/glance | 60 +++++++++---------- 2 files changed, 71 insertions(+), 43 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a6312d00c81b..5eab2a38ffb4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,6 +24,7 @@ import pickle import re import time import urllib +import uuid from xml.dom import minidom from eventlet import event @@ -318,9 +319,15 @@ class VMHelper(HelperBase): if sr_ref is None: raise exception.NotFound('Cannot find SR to write VDI to') + # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not + # have the `uuid` module. To work around this, we generate the uuids + # here (under Python 2.6+) and pass them as arguments + uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] + params = {'image_id': image, 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port} + 'glance_port': FLAGS.glance_port, + 'uuid_stack': uuid_stack} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'get_vdi', kwargs) @@ -372,14 +379,35 @@ class VMHelper(HelperBase): @classmethod def determine_disk_image_type(cls, instance): - instance_id = instance.id - if instance.kernel_id: - #if kernel is not present we must download a raw disk - LOG.debug(_("Instance %(instance_id)s will use DISK format") % - locals()) - return ImageType.DISK + """Disk Image Types are used to determine where the kernel will reside + within an image. To figure out which type we're dealing with, we use + the following rules: - if FLAGS.xenapi_image_service == 'glance': + 1. If the instance is specifying a kernel explicitly, we must be using + a 'disk' image (kernel outside of the image) + + 2. If the kernel isn't specified, then we have two different + scenarios: + + a) If the image is in Glance, then we can use the 'disk_format' + property to determine if the image is really a VHD-style image + or if it's a RAW image + + b) If the image is not in Glance, then it must be a RAW image + (since we don't have a way of identifying VHD images...yet) + """ + def log_disk_format(disk_format): + disk_format = disk_format.upper() + image_id = instance.image_id + instance_id = instance.id + LOG.debug(_("Detected %(disk_format)s format for image " + "%(image_id)s, instance %(instance_id)s") % locals()) + + if instance.kernel_id: + # 1. DISK + log_disk_format('disk') + return ImageType.DISK + elif FLAGS.xenapi_image_service == 'glance': # if using glance, then we could be VHD format client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta = client.get_image_meta(instance.image_id) @@ -388,12 +416,12 @@ class VMHelper(HelperBase): # TODO(sirp): When Glance treats disk_format as a first class # attribute, we should start using that rather than an image-property if disk_format == 'vhd': - LOG.debug(_("Instance %(instance_id)s will use DISK_VHD format") % - locals()) + # 2a. DISK_VHD + log_disk_format('disk_vhd') return ImageType.DISK_VHD - - LOG.debug(_("Instance %(instance_id)s will use DISK_RAW format") % - locals()) + + # 2b. DISK_RAW + log_disk_format('disk_raw') return ImageType.DISK_RAW @classmethod diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0cb3b52db39d..2018dca5f481 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -21,20 +21,14 @@ # XenAPI plugin for managing glance images # -import base64 -import errno -import hmac import httplib import os import os.path import pickle -import sha import shlex import shutil import subprocess import tempfile -import time -import urlparse import XenAPIPlugin @@ -74,11 +68,14 @@ def _copy_kernel_vdi(dest,copy_args): return filename -def execute(cmd): - args = shlex.split(cmd) - proc = subprocess.Popen( - args, stdout=subprocess.PIPE, stdin=subprocess.PIPE) - return proc +def assert_process_success(proc, cmd): + """Ensure that the process returned a zero exit code indicating success + """ + out, err = proc.communicate() + ret = proc.returncode + if ret != 0: + msg = "%(cmd)s returned non-zero exit code (%i): '%s'" % (ret, err) + raise Exception(msg) def get_vdi(session, args): @@ -88,6 +85,7 @@ def get_vdi(session, args): image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] + uuid_stack = params["uuid_stack"] def unbundle_xfer(sr_path, staging_path): """ @@ -101,17 +99,17 @@ def get_vdi(session, args): elif resp.status != httplib.OK: raise Exception("Unexpected response from Glance %i" % res.status) - tar_proc = execute("tar -zx --directory=%(staging_path)s" % locals()) + tar_args = shlex.split( + "tar -zx --directory=%(staging_path)s" % locals()) + tar_proc = subprocess.Popen( + tar_args, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + chunk = resp.read(CHUNK_SIZE) while chunk: tar_proc.stdin.write(chunk) chunk = resp.read(CHUNK_SIZE) - out, err = tar_proc.communicate() - # TODO(sirp): write assert_process_success - ret = tar_proc.returncode - if ret != 0: - raise Exception( - "tar returned non-zero exit code (%i): '%s'" % (ret, err)) + + assert_process_success(tar_proc, "tar") conn.close() def fixup_vhds(sr_path, staging_path): @@ -120,7 +118,7 @@ def get_vdi(session, args): def rename_with_uuid(orig_path): """Generate a uuid and rename the file with the uuid""" orig_dirname = os.path.dirname(orig_path) - uuid = generate_uuid() + uuid = uuid_stack.pop() new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) os.rename(orig_path, new_path) return new_path, uuid @@ -133,11 +131,13 @@ def get_vdi(session, args): return new_path def link_vhds(child_path, parent_path): - proc = execute("vhd-util modify -n %(child_path)s -p %(parent_path)s" - % locals()) - out, err = proc.communicate() - if proc.returncode != 0: - raise Exception("Failed to link vhds: '%s'" % err) + modify_args = shlex.split( + "vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = subprocess.Popen( + modify_args, stderr=subprocess.PIPE) + assert_process_success(modify_proc, "vhd-util") + image_path = os.path.join(staging_path, 'image') @@ -210,7 +210,10 @@ def put_vdis(session, args): conn.putheader(header, value) conn.endheaders() - tar_proc = execute("tar -zc --directory=%(staging_path)s ." % locals()) + tar_args = shlex.split( + "tar -zc --directory=%(staging_path)s ." % locals()) + tar_proc = subprocess.Popen( + tar_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) chunk = tar_proc.stdout.read(CHUNK_SIZE) while chunk: @@ -218,6 +221,8 @@ def put_vdis(session, args): chunk = tar_proc.stdout.read(CHUNK_SIZE) conn.send("0\r\n\r\n") + assert_process_success(tar_proc, "tar") + resp = conn.getresponse() #FIXME(sirp): should this be 201 Created? if resp.status != httplib.OK: @@ -277,11 +282,6 @@ def find_sr(session): return sr return None -def generate_uuid(): - # NOTE(sirp): Python2.4 does not include the uuid module - proc = execute("uuidgen") - uuid = proc.stdout.read().strip() - return uuid if __name__ == '__main__': XenAPIPlugin.dispatch({'put_vdis': put_vdis, From 0fc3a184230c479254b9f713ea61de2f24f680ab Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 18:23:14 +0000 Subject: [PATCH 05/39] Moving SR path code outside of glance plugin --- nova/virt/xenapi/vm_utils.py | 21 ++++++++++-- nova/virt/xenapi_conn.py | 2 ++ .../xenapi/etc/xapi.d/plugins/glance | 34 ++----------------- 3 files changed, 23 insertions(+), 34 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 5eab2a38ffb4..e73bc7f2b6f9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -285,7 +285,8 @@ class VMHelper(HelperBase): params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port} + 'glance_port': FLAGS.glance_port, + 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'put_vdis', kwargs) @@ -327,7 +328,8 @@ class VMHelper(HelperBase): params = {'image_id': image, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, - 'uuid_stack': uuid_stack} + 'uuid_stack': uuid_stack, + 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'get_vdi', kwargs) @@ -685,6 +687,21 @@ def find_sr(session): return None +def get_sr_path(session): + """Return the path to our Storage Repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + # TODO(sirp): add safe_find_sr + sr_ref = find_sr(session) + if sr_ref is None: + raise Exception('Cannot find SR to read VDI from') + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) + + def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a0b0499b8715..8ae5684b0bcf 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', 5, 'Max number of times to poll for VHD to coalesce.' ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount', + 'Base path to the storage repository') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 2018dca5f481..0b270f5f9584 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -38,7 +38,6 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -FILE_SR_PATH = '/var/run/sr-mount' def copy_kernel_vdi(session,args): vdi = exists(args, 'vdi-ref') @@ -86,6 +85,7 @@ def get_vdi(session, args): glance_host = params["glance_host"] glance_port = params["glance_port"] uuid_stack = params["uuid_stack"] + sr_path = params["sr_path"] def unbundle_xfer(sr_path, staging_path): """ @@ -161,7 +161,6 @@ def get_vdi(session, args): move_into_sr(base_copy_path) return vdi_uuid - sr_path = get_sr_path(session) staging_path = make_staging_area(sr_path) try: unbundle_xfer(sr_path, staging_path) @@ -179,6 +178,7 @@ def put_vdis(session, args): image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] + sr_path = params["sr_path"] def prepare_staging_area(sr_path, staging_path): """ @@ -230,7 +230,6 @@ def put_vdis(session, args): conn.close() - sr_path = get_sr_path(session) staging_path = make_staging_area(sr_path) try: prepare_staging_area(sr_path, staging_path) @@ -254,35 +253,6 @@ def cleanup_staging_area(staging_path): shutil.rmtree(staging_path) -def get_sr_path(session): - sr_ref = find_sr(session) - - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') - - sr_rec = session.xenapi.SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - sr_path = os.path.join(FILE_SR_PATH, sr_uuid) - return sr_path - - -#TODO(sirp): both objectstore and glance need this, should this be refactored -#into common lib -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None - - if __name__ == '__main__': XenAPIPlugin.dispatch({'put_vdis': put_vdis, 'get_vdi': get_vdi, From b4b1a7fbd55784157b3084016d4dfe2bd0120e51 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 18:36:17 +0000 Subject: [PATCH 06/39] Adding safe_find_sr --- nova/virt/xenapi/vm_utils.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e73bc7f2b6f9..37ce868852de 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -316,9 +316,7 @@ class VMHelper(HelperBase): LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) - sr_ref = find_sr(session) - if sr_ref is None: - raise exception.NotFound('Cannot find SR to write VDI to') + sr_ref = safe_find_sr(session) # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not # have the `uuid` module. To work around this, we generate the uuids @@ -340,16 +338,14 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, type): - sr = find_sr(session) - if sr is None: - raise exception.NotFound('Cannot find SR to write VDI to') + sr_ref = safe_find_sr(session) client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta, image_file = client.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) + if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -672,7 +668,18 @@ def get_vdi_for_vm_safely(session, vm_ref): return vdi_ref, vdi_rec +def safe_find_sr(session): + """Same as find_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR to read/write VDI')) + return sr_ref + + def find_sr(session): + """Return the storage repository to hold VM images""" host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: @@ -688,15 +695,12 @@ def find_sr(session): def get_sr_path(session): - """Return the path to our Storage Repository + """Return the path to our storage repository This is used when we're dealing with VHDs directly, either by taking snapshots or by restoring an image in the DISK_VHD format. """ - # TODO(sirp): add safe_find_sr - sr_ref = find_sr(session) - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') + sr_ref = safe_find_sr(session) sr_rec = session.get_xenapi().SR.get_record(sr_ref) sr_uuid = sr_rec["uuid"] return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) From eb603b5ec3d54b2b6c893f8d41e7d12bbaa49e57 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 18:50:40 +0000 Subject: [PATCH 07/39] Refactoring put_vdis --- .../xenapi/etc/xapi.d/plugins/glance | 144 +++++++++--------- 1 file changed, 73 insertions(+), 71 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0b270f5f9584..3fe2d4059c3f 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -67,16 +67,6 @@ def _copy_kernel_vdi(dest,copy_args): return filename -def assert_process_success(proc, cmd): - """Ensure that the process returned a zero exit code indicating success - """ - out, err = proc.communicate() - ret = proc.returncode - if ret != 0: - msg = "%(cmd)s returned non-zero exit code (%i): '%s'" % (ret, err) - raise Exception(msg) - - def get_vdi(session, args): """ """ @@ -109,7 +99,7 @@ def get_vdi(session, args): tar_proc.stdin.write(chunk) chunk = resp.read(CHUNK_SIZE) - assert_process_success(tar_proc, "tar") + _assert_process_success(tar_proc, "tar") conn.close() def fixup_vhds(sr_path, staging_path): @@ -136,7 +126,7 @@ def get_vdi(session, args): % locals()) modify_proc = subprocess.Popen( modify_args, stderr=subprocess.PIPE) - assert_process_success(modify_proc, "vhd-util") + _assert_process_success(modify_proc, "vhd-util") image_path = os.path.join(staging_path, 'image') @@ -161,13 +151,13 @@ def get_vdi(session, args): move_into_sr(base_copy_path) return vdi_uuid - staging_path = make_staging_area(sr_path) + staging_path = _make_staging_area(sr_path) try: unbundle_xfer(sr_path, staging_path) vdi_uuid = fixup_vhds(sr_path, staging_path) return vdi_uuid finally: - cleanup_staging_area(staging_path) + _cleanup_staging_area(staging_path) def put_vdis(session, args): @@ -180,67 +170,69 @@ def put_vdis(session, args): glance_port = params["glance_port"] sr_path = params["sr_path"] - def prepare_staging_area(sr_path, staging_path): - """ - Explain preparing staging area here... - """ - image_path = os.path.join(staging_path, 'image') - os.mkdir(image_path) - for name, uuid in vdi_uuids.items(): - source = os.path.join(sr_path, "%s.vhd" % uuid) - link_name = os.path.join(image_path, "%s.vhd" % name) - os.link(source, link_name) - - def bundle_xfer(staging_path): - conn = httplib.HTTPConnection(glance_host, glance_port) - #NOTE(sirp): httplib under python2.4 won't accept a file-like object - # to request - conn.putrequest('PUT', '/images/%s' % image_id) - - headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-property-disk-format': 'vhd', - 'x-image-meta-property-container-format': 'tarball', - 'transfer-encoding': "chunked", - 'content-type': 'application/octet-stream', - } - for header, value in headers.iteritems(): - conn.putheader(header, value) - conn.endheaders() - - tar_args = shlex.split( - "tar -zc --directory=%(staging_path)s ." % locals()) - tar_proc = subprocess.Popen( - tar_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - chunk = tar_proc.stdout.read(CHUNK_SIZE) - while chunk: - conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) - chunk = tar_proc.stdout.read(CHUNK_SIZE) - conn.send("0\r\n\r\n") - - assert_process_success(tar_proc, "tar") - - resp = conn.getresponse() - #FIXME(sirp): should this be 201 Created? - if resp.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) - - conn.close() - - staging_path = make_staging_area(sr_path) + staging_path = _make_staging_area(sr_path) try: - prepare_staging_area(sr_path, staging_path) - bundle_xfer(staging_path) + _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) + _bundle_xfer(staging_path, image_id, glance_host, glance_port) finally: - cleanup_staging_area(staging_path) + _cleanup_staging_area(staging_path) - return "" # FIXME(sirp): return anything useful here? + return "" -def make_staging_area(sr_path): +def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): + """ + Explain preparing staging area here... + """ + image_path = os.path.join(staging_path, 'image') + os.mkdir(image_path) + for name, uuid in vdi_uuids.items(): + source = os.path.join(sr_path, "%s.vhd" % uuid) + link_name = os.path.join(image_path, "%s.vhd" % name) + os.link(source, link_name) + + +def _bundle_xfer(staging_path, image_id, glance_host, glance_port): + """ + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + #NOTE(sirp): httplib under python2.4 won't accept a file-like object + # to request + conn.putrequest('PUT', '/images/%s' % image_id) + + headers = { + 'x-image-meta-store': 'file', + 'x-image-meta-is_public': 'True', + 'x-image-meta-type': 'raw', + 'x-image-meta-property-disk-format': 'vhd', + 'x-image-meta-property-container-format': 'tarball', + 'transfer-encoding': "chunked", + 'content-type': 'application/octet-stream', + } + for header, value in headers.iteritems(): + conn.putheader(header, value) + conn.endheaders() + + tar_args = shlex.split( + "tar -zc --directory=%(staging_path)s ." % locals()) + tar_proc = subprocess.Popen( + tar_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + chunk = tar_proc.stdout.read(CHUNK_SIZE) + while chunk: + conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) + chunk = tar_proc.stdout.read(CHUNK_SIZE) + conn.send("0\r\n\r\n") + + _assert_process_success(tar_proc, "tar") + + resp = conn.getresponse() + if resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + conn.close() + + +def _make_staging_area(sr_path): """ Explain staging area here... """ @@ -249,10 +241,20 @@ def make_staging_area(sr_path): return staging_path -def cleanup_staging_area(staging_path): +def _cleanup_staging_area(staging_path): shutil.rmtree(staging_path) +def _assert_process_success(proc, cmd): + """Ensure that the process returned a zero exit code indicating success + """ + out, err = proc.communicate() + ret = proc.returncode + if ret != 0: + msg = "%(cmd)s returned non-zero exit code (%i): '%s'" % (ret, err) + raise Exception(msg) + + if __name__ == '__main__': XenAPIPlugin.dispatch({'put_vdis': put_vdis, 'get_vdi': get_vdi, From acf95a640cfeb0812a55577b6a08bff972ad523b Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 19:17:27 +0000 Subject: [PATCH 08/39] Regrouping methods so they make sense --- nova/virt/xenapi/vm_utils.py | 7 +- .../xenapi/etc/xapi.d/plugins/glance | 233 +++++++++--------- 2 files changed, 123 insertions(+), 117 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 37ce868852de..ba3e5e423645 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -289,7 +289,7 @@ class VMHelper(HelperBase): 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'put_vdis', kwargs) + task = session.async_call_plugin('glance', 'upload_image', kwargs) session.wait_for_task(instance_id, task) @classmethod @@ -310,7 +310,6 @@ class VMHelper(HelperBase): return cls._fetch_image_objectstore(session, instance_id, image, access, user.secret, type) - @classmethod def _fetch_image_glance_vhd(cls, session, instance_id, image, access, type): LOG.debug(_("Asking xapi to fetch vhd image %(image)s") @@ -330,10 +329,10 @@ class VMHelper(HelperBase): 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'get_vdi', kwargs) + task = session.async_call_plugin('glance', 'download_image', kwargs) vdi_uuid = session.wait_for_task(instance_id, task) scan_sr(session, instance_id, sr_ref) - LOG.debug(_("Xapi 'get_vdi' returned VDI UUID %(vdi_uuid)s") % locals()) + LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") % locals()) return vdi_uuid @classmethod diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 3fe2d4059c3f..7bbab4f525be 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -39,17 +39,6 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -def copy_kernel_vdi(session,args): - vdi = exists(args, 'vdi-ref') - size = exists(args,'image-size') - #Use the uuid as a filename - vdi_uuid=session.xenapi.VDI.get_uuid(vdi) - copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} - filename=with_vdi_in_dom0(session, vdi, False, - lambda dev: - _copy_kernel_vdi('/dev/%s' % dev,copy_args)) - return filename - def _copy_kernel_vdi(dest,copy_args): vdi_uuid=copy_args['vdi_uuid'] vdi_size=copy_args['vdi_size'] @@ -67,117 +56,83 @@ def _copy_kernel_vdi(dest,copy_args): return filename -def get_vdi(session, args): +def _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port): """ + """ - params = pickle.loads(exists(args, 'params')) - image_id = params["image_id"] - glance_host = params["glance_host"] - glance_port = params["glance_port"] - uuid_stack = params["uuid_stack"] - sr_path = params["sr_path"] + conn = httplib.HTTPConnection(glance_host, glance_port) + conn.request('GET', '/images/%s' % image_id) + resp = conn.getresponse() + if resp.status == httplib.NOT_FOUND: + raise Exception("Image '%s' not found in Glance" % image_id) + elif resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) - def unbundle_xfer(sr_path, staging_path): - """ - - """ - conn = httplib.HTTPConnection(glance_host, glance_port) - conn.request('GET', '/images/%s' % image_id) - resp = conn.getresponse() - if resp.status == httplib.NOT_FOUND: - raise Exception("Image '%s' not found in Glance" % image_id) - elif resp.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) - - tar_args = shlex.split( - "tar -zx --directory=%(staging_path)s" % locals()) - tar_proc = subprocess.Popen( - tar_args, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + tar_args = shlex.split( + "tar -zx --directory=%(staging_path)s" % locals()) + tar_proc = subprocess.Popen( + tar_args, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + chunk = resp.read(CHUNK_SIZE) + while chunk: + tar_proc.stdin.write(chunk) chunk = resp.read(CHUNK_SIZE) - while chunk: - tar_proc.stdin.write(chunk) - chunk = resp.read(CHUNK_SIZE) - _assert_process_success(tar_proc, "tar") - conn.close() - - def fixup_vhds(sr_path, staging_path): - """ - """ - def rename_with_uuid(orig_path): - """Generate a uuid and rename the file with the uuid""" - orig_dirname = os.path.dirname(orig_path) - uuid = uuid_stack.pop() - new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) - os.rename(orig_path, new_path) - return new_path, uuid - - def move_into_sr(orig_path): - """Move a file into the SR""" - filename = os.path.basename(orig_path) - new_path = os.path.join(sr_path, filename) - os.rename(orig_path, new_path) - return new_path - - def link_vhds(child_path, parent_path): - modify_args = shlex.split( - "vhd-util modify -n %(child_path)s -p %(parent_path)s" - % locals()) - modify_proc = subprocess.Popen( - modify_args, stderr=subprocess.PIPE) - _assert_process_success(modify_proc, "vhd-util") + _assert_process_success(tar_proc, "tar") + conn.close() - image_path = os.path.join(staging_path, 'image') - - orig_base_copy_path = os.path.join(image_path, 'image.vhd') - if not os.path.exists(orig_base_copy_path): - raise Exception("Invalid image: image.vhd not present") - - base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) - - vdi_uuid = base_copy_uuid - orig_snap_path = os.path.join(image_path, 'snap.vhd') - if os.path.exists(orig_snap_path): - snap_path, snap_uuid = rename_with_uuid(orig_snap_path) - vdi_uuid = snap_uuid - # NOTE(sirp): this step is necessary so that an SR scan won't - # delete the base_copy out from under us (since it would be - # orphaned) - link_vhds(snap_path, base_copy_path) - move_into_sr(snap_path) - - move_into_sr(base_copy_path) - return vdi_uuid - - staging_path = _make_staging_area(sr_path) - try: - unbundle_xfer(sr_path, staging_path) - vdi_uuid = fixup_vhds(sr_path, staging_path) - return vdi_uuid - finally: - _cleanup_staging_area(staging_path) - - -def put_vdis(session, args): +def fixup_vhds(sr_path, staging_path, uuid_stack): """ """ - params = pickle.loads(exists(args, 'params')) - vdi_uuids = params["vdi_uuids"] - image_id = params["image_id"] - glance_host = params["glance_host"] - glance_port = params["glance_port"] - sr_path = params["sr_path"] + def rename_with_uuid(orig_path): + """Generate a uuid and rename the file with the uuid""" + orig_dirname = os.path.dirname(orig_path) + uuid = uuid_stack.pop() + new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) + os.rename(orig_path, new_path) + return new_path, uuid - staging_path = _make_staging_area(sr_path) - try: - _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) - _bundle_xfer(staging_path, image_id, glance_host, glance_port) - finally: - _cleanup_staging_area(staging_path) - return "" + def link_vhds(child_path, parent_path): + modify_args = shlex.split( + "vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = subprocess.Popen( + modify_args, stderr=subprocess.PIPE) + _assert_process_success(modify_proc, "vhd-util") + + + def move_into_sr(orig_path): + """Move a file into the SR""" + filename = os.path.basename(orig_path) + new_path = os.path.join(sr_path, filename) + os.rename(orig_path, new_path) + return new_path + + + image_path = os.path.join(staging_path, 'image') + + orig_base_copy_path = os.path.join(image_path, 'image.vhd') + if not os.path.exists(orig_base_copy_path): + raise Exception("Invalid image: image.vhd not present") + + base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) + + vdi_uuid = base_copy_uuid + orig_snap_path = os.path.join(image_path, 'snap.vhd') + if os.path.exists(orig_snap_path): + snap_path, snap_uuid = rename_with_uuid(orig_snap_path) + vdi_uuid = snap_uuid + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(snap_path, base_copy_path) + move_into_sr(snap_path) + + move_into_sr(base_copy_path) + return vdi_uuid def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): @@ -192,7 +147,7 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): os.link(source, link_name) -def _bundle_xfer(staging_path, image_id, glance_host, glance_port): +def _upload_tarball(staging_path, image_id, glance_host, glance_port): """ """ conn = httplib.HTTPConnection(glance_host, glance_port) @@ -255,7 +210,59 @@ def _assert_process_success(proc, cmd): raise Exception(msg) +def download_image(session, args): + """ + """ + params = pickle.loads(exists(args, 'params')) + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + uuid_stack = params["uuid_stack"] + sr_path = params["sr_path"] + + staging_path = _make_staging_area(sr_path) + try: + _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port) + vdi_uuid = fixup_vhds(sr_path, staging_path, uuid_stack) + return vdi_uuid + finally: + _cleanup_staging_area(staging_path) + + +def upload_image(session, args): + """ + """ + params = pickle.loads(exists(args, 'params')) + vdi_uuids = params["vdi_uuids"] + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + sr_path = params["sr_path"] + + staging_path = _make_staging_area(sr_path) + try: + _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) + _upload_tarball(staging_path, image_id, glance_host, glance_port) + finally: + _cleanup_staging_area(staging_path) + + return "" + + +def copy_kernel_vdi(session,args): + vdi = exists(args, 'vdi-ref') + size = exists(args,'image-size') + #Use the uuid as a filename + vdi_uuid=session.xenapi.VDI.get_uuid(vdi) + copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} + filename=with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + return filename + + if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, - 'get_vdi': get_vdi, + XenAPIPlugin.dispatch({'upload_image': upload_image, + 'download_image': download_image, 'copy_kernel_vdi': copy_kernel_vdi}) From 300657f298fbecf9a08792b6d15e462560a6cdf5 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 19:51:23 +0000 Subject: [PATCH 09/39] Adding documentation --- .../xenapi/etc/xapi.d/plugins/glance | 85 +++++++++++++++++-- 1 file changed, 76 insertions(+), 9 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 7bbab4f525be..dac773ff12cd 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -58,8 +58,8 @@ def _copy_kernel_vdi(dest,copy_args): def _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port): - """ - + """Download the tarball image from Glance and extract it into the staging + area. """ conn = httplib.HTTPConnection(glance_host, glance_port) conn.request('GET', '/images/%s' % image_id) @@ -84,10 +84,34 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, def fixup_vhds(sr_path, staging_path, uuid_stack): - """ + """Fixup the downloaded VHDs before we move them into the SR. + + We cannot extract VHDs directly into the SR since they don't yet have + UUIDs, aren't properly associated with each other, and would be subject to + a race-condition of one-file being present and the other not being + downloaded yet. + + To avoid these we problems, we use a staging area to fixup the VHDs before + moving them into the SR. The steps involved are: + + 1. Extracting tarball into staging area + + 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') + + 3. Linking the two VHDs together + + 4. Pseudo-atomically moving the images into the SR. (It's not really + atomic because it takes place as two os.rename operations; however, the + chances of an SR.scan occuring between the two rename() invocations is so + small that we can safely ignore it) """ def rename_with_uuid(orig_path): - """Generate a uuid and rename the file with the uuid""" + """Rename VHD using UUID so that it will be recognized by SR on a + subsequent scan. + + Since Python2.4 doesn't have the `uuid` module, we pass a stack of + pre-computed UUIDs from the compute worker. + """ orig_dirname = os.path.dirname(orig_path) uuid = uuid_stack.pop() new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) @@ -96,6 +120,11 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): def link_vhds(child_path, parent_path): + """Use vhd-util to associate the snapshot VHD with its base_copy. + + This needs to be done before we move both VHDs into the SR to prevent + the base_copy from being DOA (deleted-on-arrival). + """ modify_args = shlex.split( "vhd-util modify -n %(child_path)s -p %(parent_path)s" % locals()) @@ -136,8 +165,8 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): - """ - Explain preparing staging area here... + """Hard-link VHDs into staging area with appropriate filename + ('snap' or 'image.vhd') """ image_path = os.path.join(staging_path, 'image') os.mkdir(image_path) @@ -149,6 +178,8 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): def _upload_tarball(staging_path, image_id, glance_host, glance_port): """ + Create a tarball of the image and then stream that into Glance + using chunked-transfer-encoded HTTP. """ conn = httplib.HTTPConnection(glance_host, glance_port) #NOTE(sirp): httplib under python2.4 won't accept a file-like object @@ -189,7 +220,36 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): def _make_staging_area(sr_path): """ - Explain staging area here... + The staging area is a place where we can temporarily store and + manipulate VHDs. The use of the staging area is different for upload and + download: + + Download + ======== + + When we download the tarball, the VHDs contained within will have names + like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before + moving them into the SR. However, since 'image.vhd' may be a base_copy, we + need to link it to 'snap.vhd' (using vhd-util modify) before moving both + into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted). + The staging area gives us a place to perform these operations before they + are moved to the SR, scanned, and then registered with XenServer. + + Upload + ====== + + On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' + in the case of the snapshot VHD, and 'image.vhd' in the case of the + base_copy. The staging area provides a directory in which we can create + hard-links to rename the VHDs without affecting what's in the SR. + + + NOTE + ==== + + The staging area is created as a subdirectory within the SR in order to + guarantee that it resides within the same filesystem and therefore permit + hard-linking and cheap file moves. """ # NOTE(sirp): staging area is created in SR to allow hard-linking staging_path = tempfile.mkdtemp(dir=sr_path) @@ -197,6 +257,12 @@ def _make_staging_area(sr_path): def _cleanup_staging_area(staging_path): + """Remove staging area directory + + On upload, the staging area contains hard-links to the VHDs in the SR; + it's safe to remove the staging-area because the SR will keep the link + count > 0 (so the VHDs in the SR will not be deleted). + """ shutil.rmtree(staging_path) @@ -211,7 +277,8 @@ def _assert_process_success(proc, cmd): def download_image(session, args): - """ + """Download an image from Glance, unbundle it, and then deposit the VHDs + into the storage repository """ params = pickle.loads(exists(args, 'params')) image_id = params["image_id"] @@ -231,7 +298,7 @@ def download_image(session, args): def upload_image(session, args): - """ + """Bundle the VHDs comprising an image and then stream them into Glance. """ params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] From 1d72b9d3ddc835d788ba1fec1a937c2788e94b38 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 21:36:13 +0000 Subject: [PATCH 10/39] Using Nova style nokernel --- nova/api/openstack/servers.py | 8 +------- nova/compute/api.py | 2 ++ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 9 ++++++--- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 1c9dcd9a6049..cd0cea235b26 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -138,13 +138,7 @@ class Controller(wsgi.Controller): _("%(param)s property not found for image %(_image_id)s") % locals()) - image_id = str(image_id) - image = self._image_service.show(req.environ['nova.context'], image_id) - disk_format = image['properties'].get('disk_format', None) - if disk_format == "vhd": - return None, None - else: - return lookup('kernel_id'), lookup('ramdisk_id') + return lookup('kernel_id'), lookup('ramdisk_id') def create(self, req): """ Creates a new server for a given user """ diff --git a/nova/compute/api.py b/nova/compute/api.py index ac02dbcfa882..6d28e376cac4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -106,6 +106,8 @@ class API(base.Base): kernel_id = image.get('kernelId', None) if ramdisk_id is None: ramdisk_id = image.get('ramdiskId', None) + + # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index dac773ff12cd..75bdda2c67c6 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -182,15 +182,19 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): using chunked-transfer-encoded HTTP. """ conn = httplib.HTTPConnection(glance_host, glance_port) - #NOTE(sirp): httplib under python2.4 won't accept a file-like object + # NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request conn.putrequest('PUT', '/images/%s' % image_id) + # FIXME(sirp): nokernel signals Nova to use a raw image. This is defined by + # FLAGS.null_kernel. Is there a way to get rid of this? headers = { 'x-image-meta-store': 'file', 'x-image-meta-is_public': 'True', 'x-image-meta-type': 'raw', 'x-image-meta-property-disk-format': 'vhd', + 'x-image-meta-property-kernel-id': 'nokernel', + 'x-image-meta-property-ramdisk-id': 'noramdisk', 'x-image-meta-property-container-format': 'tarball', 'transfer-encoding': "chunked", 'content-type': 'application/octet-stream', @@ -251,7 +255,6 @@ def _make_staging_area(sr_path): guarantee that it resides within the same filesystem and therefore permit hard-linking and cheap file moves. """ - # NOTE(sirp): staging area is created in SR to allow hard-linking staging_path = tempfile.mkdtemp(dir=sr_path) return staging_path @@ -314,7 +317,7 @@ def upload_image(session, args): finally: _cleanup_staging_area(staging_path) - return "" + return "" # Nothing useful to return on an upload def copy_kernel_vdi(session,args): From c33378fbbe0fd76e807530522715ba4175af18d8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 21:54:42 +0000 Subject: [PATCH 11/39] Fixing typo --- nova/api/openstack/servers.py | 11 ++++++++--- nova/virt/xenapi/vm_utils.py | 14 +++++++++++++- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index cd0cea235b26..c15e499a06a3 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -129,8 +129,12 @@ class Controller(wsgi.Controller): Machine images are associated with Kernels and Ramdisk images via metadata stored in Glance as 'image_properties' """ - def lookup(param): - _image_id = image_id + # FIXME(sirp): Currently Nova requires us to specify the `null_kernel` + # identifier ('nokernel') to indicate a RAW (or VHD) image. It would + # be better if we could omit the kernel_id and ramdisk_id properties + # on the image + def lookup(image, param): + _image_id = image.id try: return image['properties'][param] except KeyError: @@ -138,7 +142,8 @@ class Controller(wsgi.Controller): _("%(param)s property not found for image %(_image_id)s") % locals()) - return lookup('kernel_id'), lookup('ramdisk_id') + image = self._image_service.show(req.environ['nova.context'], image_id) + return lookup(image, 'kernel_id'), lookup(image, 'ramdisk_id') def create(self, req): """ Creates a new server for a given user """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ba3e5e423645..6fa03ee681a1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -331,12 +331,24 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_image', kwargs) vdi_uuid = session.wait_for_task(instance_id, task) + # TODO(sirp): set name-label on VDI scan_sr(session, instance_id, sr_ref) LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") % locals()) return vdi_uuid @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, type): + """Fetch the image from Glance + + NOTE: + Unlike _fetch_image_glance_vhd, this method does not use the Glance + plugin; instead, it streams the disks through domU to the VDI + directly. + + """ + # FIXME(sirp): Since the Glance plugin seems to be required for the VHD disk, + # it may be worth using the plugin for both VHD and RAW and DISK + # restores sr_ref = safe_find_sr(session) client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) @@ -349,7 +361,7 @@ class VMHelper(HelperBase): # Make room for MBR. vdi_size += MBR_SIZE_BYTES - vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi = cls.create_vdi(session, sr_ref, _('Glance image %s') % image, vdi_size, False) with_vdi_attached_here(session, vdi, False, From 24d988263324c9136a1cd9aa5a3a3c4fdf229651 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 22:19:58 +0000 Subject: [PATCH 12/39] Set name-label on VDI --- nova/virt/xenapi/vm_utils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6fa03ee681a1..738372da02bc 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -331,8 +331,14 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_image', kwargs) vdi_uuid = session.wait_for_task(instance_id, task) - # TODO(sirp): set name-label on VDI + scan_sr(session, instance_id, sr_ref) + + # Set the name-label to ease debugging + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") % locals()) return vdi_uuid @@ -361,8 +367,8 @@ class VMHelper(HelperBase): # Make room for MBR. vdi_size += MBR_SIZE_BYTES - vdi = cls.create_vdi(session, sr_ref, _('Glance image %s') % image, - vdi_size, False) + name_label = get_name_label_for_image(image) + vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) with_vdi_attached_here(session, vdi, False, lambda dev: @@ -848,3 +854,8 @@ def _write_partition(virtual_size, dev): (dest, primary_first, primary_last)) LOG.debug(_('Writing partition table %s done.'), dest) + + +def get_name_label_for_image(image): + # TODO(sirp): This should eventually be the URI for the Glance image + return _('Glance image %s') % image From 28ba475d9c32a384570ce6eb0e2f9cfc3dc79a08 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 22:24:48 +0000 Subject: [PATCH 13/39] Pep8 fixes --- nova/virt/xenapi/vm_utils.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 738372da02bc..93009b408117 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -311,7 +311,8 @@ class VMHelper(HelperBase): access, user.secret, type) @classmethod - def _fetch_image_glance_vhd(cls, session, instance_id, image, access, type): + def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + type): LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) @@ -339,11 +340,13 @@ class VMHelper(HelperBase): name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) - LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") % locals()) + LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") + % locals()) return vdi_uuid @classmethod - def _fetch_image_glance_disk(cls, session, instance_id, image, access, type): + def _fetch_image_glance_disk(cls, session, instance_id, image, access, + type): """Fetch the image from Glance NOTE: @@ -352,9 +355,9 @@ class VMHelper(HelperBase): directly. """ - # FIXME(sirp): Since the Glance plugin seems to be required for the VHD disk, - # it may be worth using the plugin for both VHD and RAW and DISK - # restores + # FIXME(sirp): Since the Glance plugin seems to be required for the + # VHD disk, it may be worth using the plugin for both VHD and RAW and + # DISK restores sr_ref = safe_find_sr(session) client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) @@ -429,12 +432,13 @@ class VMHelper(HelperBase): properties = meta['properties'] disk_format = properties.get('disk_format', None) # TODO(sirp): When Glance treats disk_format as a first class - # attribute, we should start using that rather than an image-property + # attribute, we should start using that rather than an + # image-property if disk_format == 'vhd': # 2a. DISK_VHD log_disk_format('disk_vhd') return ImageType.DISK_VHD - + # 2b. DISK_RAW log_disk_format('disk_raw') return ImageType.DISK_RAW From c7cd7b755c86bd15e2b19f70a09f88f62361596c Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 22:31:51 +0000 Subject: [PATCH 14/39] More pep8 fixes --- .../xenapi/etc/xapi.d/plugins/glance | 51 +++++++++---------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 75bdda2c67c6..43c58dcff26e 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -39,20 +39,22 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -def _copy_kernel_vdi(dest,copy_args): - vdi_uuid=copy_args['vdi_uuid'] - vdi_size=copy_args['vdi_size'] - logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) - filename=KERNEL_DIR + '/' + vdi_uuid + +def _copy_kernel_vdi(dest, copy_args): + vdi_uuid = copy_args['vdi_uuid'] + vdi_size = copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", + dest, vdi_uuid) + filename = KERNEL_DIR + '/' + vdi_uuid #read data from /dev/ and write into a file on /boot/guest - of=open(filename,'wb') - f=open(dest,'rb') + of = open(filename, 'wb') + f = open(dest, 'rb') #copy only vdi_size bytes - data=f.read(vdi_size) + data = f.read(vdi_size) of.write(data) f.close() of.close() - logging.debug("Done. Filename: %s",filename) + logging.debug("Done. Filename: %s", filename) return filename @@ -101,9 +103,9 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): 3. Linking the two VHDs together 4. Pseudo-atomically moving the images into the SR. (It's not really - atomic because it takes place as two os.rename operations; however, the - chances of an SR.scan occuring between the two rename() invocations is so - small that we can safely ignore it) + atomic because it takes place as two os.rename operations; however, + the chances of an SR.scan occuring between the two rename() + invocations is so small that we can safely ignore it) """ def rename_with_uuid(orig_path): """Rename VHD using UUID so that it will be recognized by SR on a @@ -118,7 +120,6 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): os.rename(orig_path, new_path) return new_path, uuid - def link_vhds(child_path, parent_path): """Use vhd-util to associate the snapshot VHD with its base_copy. @@ -132,7 +133,6 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): modify_args, stderr=subprocess.PIPE) _assert_process_success(modify_proc, "vhd-util") - def move_into_sr(orig_path): """Move a file into the SR""" filename = os.path.basename(orig_path) @@ -140,7 +140,6 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): os.rename(orig_path, new_path) return new_path - image_path = os.path.join(staging_path, 'image') orig_base_copy_path = os.path.join(image_path, 'image.vhd') @@ -157,7 +156,7 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): # NOTE(sirp): this step is necessary so that an SR scan won't # delete the base_copy out from under us (since it would be # orphaned) - link_vhds(snap_path, base_copy_path) + link_vhds(snap_path, base_copy_path) move_into_sr(snap_path) move_into_sr(base_copy_path) @@ -227,7 +226,7 @@ def _make_staging_area(sr_path): The staging area is a place where we can temporarily store and manipulate VHDs. The use of the staging area is different for upload and download: - + Download ======== @@ -241,7 +240,7 @@ def _make_staging_area(sr_path): Upload ====== - + On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' in the case of the snapshot VHD, and 'image.vhd' in the case of the base_copy. The staging area provides a directory in which we can create @@ -264,7 +263,7 @@ def _cleanup_staging_area(staging_path): On upload, the staging area contains hard-links to the VHDs in the SR; it's safe to remove the staging-area because the SR will keep the link - count > 0 (so the VHDs in the SR will not be deleted). + count > 0 (so the VHDs in the SR will not be deleted). """ shutil.rmtree(staging_path) @@ -320,15 +319,15 @@ def upload_image(session, args): return "" # Nothing useful to return on an upload -def copy_kernel_vdi(session,args): +def copy_kernel_vdi(session, args): vdi = exists(args, 'vdi-ref') - size = exists(args,'image-size') + size = exists(args, 'image-size') #Use the uuid as a filename - vdi_uuid=session.xenapi.VDI.get_uuid(vdi) - copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} - filename=with_vdi_in_dom0(session, vdi, False, - lambda dev: - _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + vdi_uuid = session.xenapi.VDI.get_uuid(vdi) + copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} + filename = with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev, copy_args)) return filename From 21a3d77fee681d05c465c74e40177ae022bc24af Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 22:41:27 +0000 Subject: [PATCH 15/39] Fixing test by adding stub for get_image_meta --- nova/tests/glance/stubs.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index f182b857a598..4cd5c357f4e5 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -29,9 +29,10 @@ class FakeGlance(object): def __init__(self, host, port=None, use_ssl=False): pass - def get_image(self, image): - meta = { - 'size': 0, - } + def get_image_meta(self, image_id): + return {'size': 0, 'properties': {}} + + def get_image(self, image_id): + meta = self.get_image_meta(image_id) image_file = StringIO.StringIO('') return meta, image_file From af920572f42b07c3ea491015d30eb5001d1f735d Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 15 Feb 2011 23:36:23 +0000 Subject: [PATCH 16/39] Adding vhd hidden sanity check --- .../xenapi/etc/xapi.d/plugins/glance | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 43c58dcff26e..ceb9a218517a 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -140,6 +140,25 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): os.rename(orig_path, new_path) return new_path + def assert_vhd_not_hidden(path): + """ + This is a sanity check on the image; if a snap.vhd isn't + present, then the image.vhd better not be marked 'hidden' or it will + be deleted when moved into the SR. + """ + vhd_query_args = shlex.split( + "vhd-util query -n %(path)s -f" % locals()) + vhd_query_proc = subprocess.Popen( + vhd_query_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = _assert_process_success(vhd_query_proc, "vhd-util") + for line in out.splitlines(): + if line.startswith('hidden'): + value = line.split(':')[1].strip() + if value == "1": + raise Exception( + "VHD %(path)s is marked as hidden without child" % + locals()) + image_path = os.path.join(staging_path, 'image') orig_base_copy_path = os.path.join(image_path, 'image.vhd') @@ -158,6 +177,8 @@ def fixup_vhds(sr_path, staging_path, uuid_stack): # orphaned) link_vhds(snap_path, base_copy_path) move_into_sr(snap_path) + else: + assert_vhd_not_hidden(base_copy_path) move_into_sr(base_copy_path) return vdi_uuid @@ -276,7 +297,7 @@ def _assert_process_success(proc, cmd): if ret != 0: msg = "%(cmd)s returned non-zero exit code (%i): '%s'" % (ret, err) raise Exception(msg) - + return out, err def download_image(session, args): """Download an image from Glance, unbundle it, and then deposit the VHDs From 163e81ac2bc2f9945273b0659ceb473767e5b19f Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 16 Feb 2011 11:53:50 -0500 Subject: [PATCH 17/39] This implements the blueprint 'Openstack API support for hostId': https://blueprints.launchpad.net/nova/+spec/openstack-api-hostid Now instances will have a unique hostId which for now is just a hash of the host. If the instance does not have a host yet, the hostId will be ''. --- nova/api/openstack/servers.py | 7 +++- nova/tests/api/openstack/test_servers.py | 41 ++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17c5519a1a91..58eda53b9bb1 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import hashlib import json import traceback @@ -65,7 +66,11 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) inst_dict['metadata'] = {} - inst_dict['hostId'] = '' + + if inst['host']: + inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() + else: + inst_dict['hostId'] = '' return dict(server=inst_dict) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 724f14f19ccd..e615141ffea9 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -43,6 +43,10 @@ def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] +def return_servers_with_host(context, user_id=1): + return [stub_instance(i, user_id, 1) for i in xrange(5)] + + def return_security_group(context, instance_id, security_group_id): pass @@ -55,9 +59,13 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id) +def stub_instance(id, user_id=1, with_hosts=False): + if with_hosts: + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id, host='host%s' % (id % 2)) + else: + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id) def fake_compute_api(cls, req, id): @@ -229,6 +237,33 @@ class ServersTest(unittest.TestCase): i = 0 for s in res_dict['servers']: self.assertEqual(s['id'], i) + self.assertEqual(s['hostId'], '') + self.assertEqual(s['name'], 'server%d' % i) + self.assertEqual(s['imageId'], 10) + i += 1 + + def test_get_all_server_details_with_host(self): + ''' + We want to make sure that if two instances are on the same host, then + they return the same hostId. If two instances are on different hosts, + they should return different hostId's. In this test, we get 5 instances + back where 2 are on one host and 3 are on another. + ''' + self.stubs.Set(nova.db.api, 'instance_get_all_by_user', + return_servers_with_host) + req = webob.Request.blank('/v1.0/servers/detail') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + server_list = res_dict['servers'] + host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] + self.assertTrue(host_ids[0]) + self.assertTrue(host_ids[1]) + self.assertTrue(host_ids[0] != host_ids[1]) + i = 0 + for s in res_dict['servers']: + self.assertEqual(s['id'], i) + self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], 10) i += 1 From 585ba4d6cf25eabf83b1b33a6de794ce671c0c98 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 16 Feb 2011 18:43:55 +0000 Subject: [PATCH 18/39] Putting glance plugin under pep8 control --- nova/compute/api.py | 3 --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 1 + run_tests.sh | 6 +++++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 8a16baf450b5..ea81c7ff045d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -104,11 +104,8 @@ class API(base.Base): image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernel_id', None) - # FIXME(sirp): which one to use? - #kernel_id = image.get('kernelId', None) if ramdisk_id is None: ramdisk_id = image.get('ramdisk_id', None) - #ramdisk_id = image.get('ramdiskId', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 47e094f02e50..f737c6c413e0 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -299,6 +299,7 @@ def _assert_process_success(proc, cmd): raise Exception(msg) return out, err + def download_image(session, args): """Download an image from Glance, unbundle it, and then deposit the VHDs into the storage repository diff --git a/run_tests.sh b/run_tests.sh index 4e21fe945983..6c7dd5f46ad7 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -73,7 +73,11 @@ fi if [ -z "$noseargs" ]; then - run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1 + PEP8_EXCLUDE=vcsversion.py + PEP8_OPTIONS="--exclude=$PEP8_EXCLUDE --repeat --show-pep8 --show-source" + # TODO(sirp): Put tests/ run_tests.py and all plugins/ under pep8 control + PEP8_INCLUDE="bin/* nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" + run_tests && pep8 $PEP8_OPTIONS $PEP8_INCLUDE || exit 1 else run_tests fi From a5ec2be709d28267075ddc9616c5c29b62622af5 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 16 Feb 2011 23:07:43 +0000 Subject: [PATCH 19/39] Adding basic test --- nova/tests/glance/stubs.py | 22 ++++++++++++++++++---- nova/tests/test_xenapi.py | 4 ++++ nova/tests/xenapi/stubs.py | 6 ++++++ 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 4cd5c357f4e5..fc120e5235cd 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -26,13 +26,27 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): + IMAGE_FIXTURES = { + 1: {'image_meta': {'name': 'fakemachine', 'size': 0, + 'properties': {}}, + 'image_data': StringIO.StringIO('') }, + 2: {'image_meta': {'name': 'fakekernel', 'size': 0, + 'properties': {}}, + 'image_data': StringIO.StringIO('') }, + 3: {'image_meta': {'name': 'fakekernel', 'size': 0, + 'properties': {}}, + 'image_data': StringIO.StringIO('') }, + 4: {'image_meta': {'name': 'fakekernel', 'size': 0, + 'properties': {'disk_format': 'vhd'}}, + 'image_data': StringIO.StringIO('') }, + } + def __init__(self, host, port=None, use_ssl=False): pass def get_image_meta(self, image_id): - return {'size': 0, 'properties': {}} + return self.IMAGE_FIXTURES[image_id]['image_meta'] def get_image(self, image_id): - meta = self.get_image_meta(image_id) - image_file = StringIO.StringIO('') - return meta, image_file + image = self.IMAGE_FIXTURES[image_id] + return image['image_meta'], image['image_data'] diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d5660c5d1f92..75387e7f5b83 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -279,6 +279,10 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_image_service = 'glance' self._test_spawn(1, None, None) + def test_spawn_vhd_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(4, None, None) + def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(1, 2, 3) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 624995ada1a1..2e3b62a773e9 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -171,6 +171,12 @@ class FakeSessionForVMTests(fake.SessionBase): def VM_destroy(self, session_ref, vm_ref): fake.destroy_vm(vm_ref) + def SR_scan(self, session_ref, sr_ref): + pass + + def VDI_set_name_label(self, session_ref, vdi_ref, name_label): + pass + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ From c56b1814cfae7a9c814b2d37388aff5e772771b6 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 16 Feb 2011 23:39:12 +0000 Subject: [PATCH 20/39] Pep8 fixes --- nova/tests/glance/stubs.py | 8 ++++---- nova/virt/xenapi/vm_utils.py | 8 +++++--- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 8 ++++---- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index fc120e5235cd..c583579625d2 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -29,16 +29,16 @@ class FakeGlance(object): IMAGE_FIXTURES = { 1: {'image_meta': {'name': 'fakemachine', 'size': 0, 'properties': {}}, - 'image_data': StringIO.StringIO('') }, + 'image_data': StringIO.StringIO('')}, 2: {'image_meta': {'name': 'fakekernel', 'size': 0, 'properties': {}}, - 'image_data': StringIO.StringIO('') }, + 'image_data': StringIO.StringIO('')}, 3: {'image_meta': {'name': 'fakekernel', 'size': 0, 'properties': {}}, - 'image_data': StringIO.StringIO('') }, + 'image_data': StringIO.StringIO('')}, 4: {'image_meta': {'name': 'fakekernel', 'size': 0, 'properties': {'disk_format': 'vhd'}}, - 'image_data': StringIO.StringIO('') }, + 'image_data': StringIO.StringIO('')}, } def __init__(self, host, port=None, use_ssl=False): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d77db2ddb551..33945aca3782 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -289,6 +289,8 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ + # NOTE(sirp): Currently we only support uploading images as VHD, there + # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) @@ -299,7 +301,7 @@ class VMHelper(HelperBase): 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'upload_image', kwargs) + task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(instance_id, task) @classmethod @@ -340,7 +342,7 @@ class VMHelper(HelperBase): 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'download_image', kwargs) + task = session.async_call_plugin('glance', 'download_vhd', kwargs) vdi_uuid = session.wait_for_task(instance_id, task) scan_sr(session, instance_id, sr_ref) @@ -350,7 +352,7 @@ class VMHelper(HelperBase): name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) - LOG.debug(_("xapi 'download_image' returned VDI UUID %(vdi_uuid)s") + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") % locals()) return vdi_uuid diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index f737c6c413e0..c3f793dddfad 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -300,7 +300,7 @@ def _assert_process_success(proc, cmd): return out, err -def download_image(session, args): +def download_vhd(session, args): """Download an image from Glance, unbundle it, and then deposit the VHDs into the storage repository """ @@ -321,7 +321,7 @@ def download_image(session, args): _cleanup_staging_area(staging_path) -def upload_image(session, args): +def upload_vhd(session, args): """Bundle the VHDs comprising an image and then stream them into Glance. """ params = pickle.loads(exists(args, 'params')) @@ -365,7 +365,7 @@ def remove_kernel_ramdisk(session, args): if __name__ == '__main__': - XenAPIPlugin.dispatch({'upload_image': upload_image, - 'download_image': download_image, + XenAPIPlugin.dispatch({'upload_vhd': upload_vhd, + 'download_vhd': download_vhd, 'copy_kernel_vdi': copy_kernel_vdi, 'remove_kernel_ramdisk': remove_kernel_ramdisk}) From ce847afcc1e24463d7aa522f227a08193c72fcc0 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 16 Feb 2011 19:12:44 -0500 Subject: [PATCH 21/39] Moved definition of return_servers_with_host stub to inside the test_get_all_server_details_with_host test. --- nova/api/openstack/servers.py | 3 +-- nova/tests/api/openstack/test_servers.py | 29 ++++++++++++------------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 58eda53b9bb1..323e6fda684d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -67,10 +67,9 @@ def _translate_detail_keys(inst): inst_dict['addresses'] = dict(public=[], private=[]) inst_dict['metadata'] = {} + inst_dict['hostId'] = '' if inst['host']: inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() - else: - inst_dict['hostId'] = '' return dict(server=inst_dict) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e615141ffea9..6c91b3f5b172 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -43,10 +43,6 @@ def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] -def return_servers_with_host(context, user_id=1): - return [stub_instance(i, user_id, 1) for i in xrange(5)] - - def return_security_group(context, instance_id, security_group_id): pass @@ -60,12 +56,8 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1, with_hosts=False): - if with_hosts: - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id, host='host%s' % (id % 2)) - else: - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id) + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id) def fake_compute_api(cls, req, id): @@ -246,20 +238,27 @@ class ServersTest(unittest.TestCase): ''' We want to make sure that if two instances are on the same host, then they return the same hostId. If two instances are on different hosts, - they should return different hostId's. In this test, we get 5 instances - back where 2 are on one host and 3 are on another. + they should return different hostId's. In this test, there are 5 + instances - 2 on one host and 3 on another. ''' + + def return_servers_with_host(context, user_id=1): + return [ + Instance(id=i, state=0, image_id=10, user_id=user_id, + display_name='server%s' % i, host='host%s' % (i % 2)) + for i in xrange(5)] self.stubs.Set(nova.db.api, 'instance_get_all_by_user', - return_servers_with_host) + return_servers_with_host) + req = webob.Request.blank('/v1.0/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) server_list = res_dict['servers'] host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] - self.assertTrue(host_ids[0]) - self.assertTrue(host_ids[1]) + self.assertTrue(host_ids[0] and host_ids[1]) self.assertTrue(host_ids[0] != host_ids[1]) + i = 0 for s in res_dict['servers']: self.assertEqual(s['id'], i) From 56ad2a63f1dcf4a900fa4464671015dbaac05fdc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 16 Feb 2011 19:37:28 -0500 Subject: [PATCH 22/39] Minor change. Adding a helper function stub_instance() inside the test test_get_all_server_details_with_host for readability. --- nova/tests/api/openstack/test_servers.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6c91b3f5b172..630e1e5eb6cd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -55,7 +55,7 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1, with_hosts=False): +def stub_instance(id, user_id=1): return Instance(id=id, state=0, image_id=10, user_id=user_id, display_name='server%s' % id) @@ -242,11 +242,13 @@ class ServersTest(unittest.TestCase): instances - 2 on one host and 3 on another. ''' + def stub_instance(id, user_id=1): + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id, host='host%s' % (id % 2)) + def return_servers_with_host(context, user_id=1): - return [ - Instance(id=i, state=0, image_id=10, user_id=user_id, - display_name='server%s' % i, host='host%s' % (i % 2)) - for i in xrange(5)] + return [stub_instance(i) for i in xrange(5)] + self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers_with_host) From 04e29f6dc4b13b6fd0cbe5013cf241a727eb56ac Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 17 Feb 2011 01:24:31 +0000 Subject: [PATCH 23/39] Use glance image type to determine disk type --- nova/tests/glance/stubs.py | 12 ++-- nova/virt/xenapi/vm_utils.py | 66 ++++++++++--------- nova/virt/xenapi/vmops.py | 9 ++- .../xenapi/etc/xapi.d/plugins/glance | 3 +- 4 files changed, 48 insertions(+), 42 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index c583579625d2..1a5fb7ffb283 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -28,16 +28,16 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): IMAGE_FIXTURES = { 1: {'image_meta': {'name': 'fakemachine', 'size': 0, - 'properties': {}}, + 'type': 'machine'}, 'image_data': StringIO.StringIO('')}, 2: {'image_meta': {'name': 'fakekernel', 'size': 0, - 'properties': {}}, + 'type': 'kernel'}, 'image_data': StringIO.StringIO('')}, - 3: {'image_meta': {'name': 'fakekernel', 'size': 0, - 'properties': {}}, + 3: {'image_meta': {'name': 'fakeramdisk', 'size': 0, + 'type': 'ramdisk'}, 'image_data': StringIO.StringIO('')}, - 4: {'image_meta': {'name': 'fakekernel', 'size': 0, - 'properties': {'disk_format': 'vhd'}}, + 4: {'image_meta': {'name': 'fakevhd', 'size': 0, + 'type': 'vhd'}, 'image_data': StringIO.StringIO('')}, } diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 33945aca3782..278e52211918 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -413,47 +413,51 @@ class VMHelper(HelperBase): within an image. To figure out which type we're dealing with, we use the following rules: - 1. If the instance is specifying a kernel explicitly, we must be using - a 'disk' image (kernel outside of the image) + 1. If we're using Glance, we can use the image_type field to + determine the image_type - 2. If the kernel isn't specified, then we have two different - scenarios: - - a) If the image is in Glance, then we can use the 'disk_format' - property to determine if the image is really a VHD-style image - or if it's a RAW image - - b) If the image is not in Glance, then it must be a RAW image - (since we don't have a way of identifying VHD images...yet) + 2. If we're not using Glance, then we need to deduce this based on + whether a kernel_id is specified. """ - def log_disk_format(disk_format): - disk_format = disk_format.upper() + def log_disk_format(image_type): + pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + ImageType.DISK: 'DISK', + ImageType.DISK_RAW: 'DISK_RAW', + ImageType.DISK_VHD: 'DISK_VHD'} + disk_format = pretty_format[image_type] image_id = instance.image_id instance_id = instance.id LOG.debug(_("Detected %(disk_format)s format for image " "%(image_id)s, instance %(instance_id)s") % locals()) - if instance.kernel_id: - # 1. DISK - log_disk_format('disk') - return ImageType.DISK - elif FLAGS.xenapi_image_service == 'glance': - # if using glance, then we could be VHD format + def determine_from_glance(): + glance_type2nova_type = {'machine': ImageType.DISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD, + 'kernel': ImageType.KERNEL_RAMDISK, + 'ramdisk': ImageType.KERNEL_RAMDISK} client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta = client.get_image_meta(instance.image_id) - properties = meta['properties'] - disk_format = properties.get('disk_format', None) - # TODO(sirp): When Glance treats disk_format as a first class - # attribute, we should start using that rather than an - # image-property - if disk_format == 'vhd': - # 2a. DISK_VHD - log_disk_format('disk_vhd') - return ImageType.DISK_VHD + type_ = meta['type'] + try: + return glance_type2nova_type[type_] + except KeyError: + raise exception.NotFound( + _("Unrecognized image type '%(type_)s'") % locals()) - # 2b. DISK_RAW - log_disk_format('disk_raw') - return ImageType.DISK_RAW + def determine_from_instance(): + if instance.kernel_id: + return ImageType.DISK + else: + return ImageType.DISK_RAW + + if FLAGS.xenapi_image_service == 'glance': + image_type = determine_from_glance() + else: + image_type = determine_from_instance() + + log_disk_format(image_type) + return image_type @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, type): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 943d74da3e21..961d589d5095 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -224,7 +224,8 @@ class VMOps(object): VMHelper.upload_image( self._session, instance.id, template_vdi_uuids, image_id) finally: - self._destroy(instance, template_vm_ref, shutdown=False) + self._destroy(instance, template_vm_ref, shutdown=False, + destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance) @@ -368,7 +369,8 @@ class VMOps(object): vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) - def _destroy(self, instance, vm, shutdown=True): + def _destroy(self, instance, vm, shutdown=True, + destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: @@ -385,7 +387,8 @@ class VMOps(object): self._shutdown(instance, vm) self._destroy_vdis(instance, vm) - self._destroy_kernel_ramdisk(instance, vm) + if destroy_kernel_ramdisk: + self._destroy_kernel_ramdisk(instance, vm) self._destroy_vm(instance, vm) def _wait_with_callback(self, instance_id, task, callback): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index c3f793dddfad..a91f8a7c1cd1 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -211,8 +211,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): headers = { 'x-image-meta-store': 'file', 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-property-disk-format': 'vhd', + 'x-image-meta-type': 'vhd', 'x-image-meta-property-kernel-id': 'nokernel', 'x-image-meta-property-ramdisk-id': 'noramdisk', 'x-image-meta-property-container-format': 'tarball', From 923a4938b73b84aa8a31f08a7c7b983cc82959fe Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 17 Feb 2011 07:29:50 +0000 Subject: [PATCH 24/39] Adding tests --- nova/tests/glance/stubs.py | 25 +++++++++---- nova/tests/test_xenapi.py | 70 ++++++++++++++++++++++++++++++++++-- nova/tests/xenapi/stubs.py | 10 ++++++ nova/virt/xenapi/vm_utils.py | 4 +++ 4 files changed, 100 insertions(+), 9 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 1a5fb7ffb283..3ff8d7ce5698 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -26,20 +26,33 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): + IMAGE_MACHINE = 1 + IMAGE_KERNEL = 2 + IMAGE_RAMDISK = 3 + IMAGE_RAW = 4 + IMAGE_VHD = 5 + IMAGE_FIXTURES = { - 1: {'image_meta': {'name': 'fakemachine', 'size': 0, + IMAGE_MACHINE: { + 'image_meta': {'name': 'fakemachine', 'size': 0, 'type': 'machine'}, 'image_data': StringIO.StringIO('')}, - 2: {'image_meta': {'name': 'fakekernel', 'size': 0, + IMAGE_KERNEL: { + 'image_meta': {'name': 'fakekernel', 'size': 0, 'type': 'kernel'}, 'image_data': StringIO.StringIO('')}, - 3: {'image_meta': {'name': 'fakeramdisk', 'size': 0, + IMAGE_RAMDISK: { + 'image_meta': {'name': 'fakeramdisk', 'size': 0, 'type': 'ramdisk'}, 'image_data': StringIO.StringIO('')}, - 4: {'image_meta': {'name': 'fakevhd', 'size': 0, - 'type': 'vhd'}, + IMAGE_RAW: { + 'image_meta': {'name': 'fakeraw', 'size': 0, + 'type': 'raw'}, 'image_data': StringIO.StringIO('')}, - } + IMAGE_VHD: { + 'image_meta': {'name': 'fakevhd', 'size': 0, + 'type': 'vhd'}, + 'image_data': StringIO.StringIO('')}} def __init__(self, host, port=None, use_ssl=False): pass diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 75387e7f5b83..f8a3d72c403f 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -31,6 +31,7 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vm_utils from nova.virt.xenapi.vmops import SimpleDH from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs @@ -162,6 +163,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) + stubs.stubout_lookup_image(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) @@ -277,15 +279,17 @@ class XenAPIVMTestCase(test.TestCase): def test_spawn_raw_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, None, None) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) def test_spawn_vhd_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(4, None, None) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None) def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, 2, 3) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, + glance_stubs.FakeGlance.IMAGE_KERNEL, + glance_stubs.FakeGlance.IMAGE_RAMDISK) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() @@ -334,3 +338,63 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() + + +class XenAPIDetermineDiskImageTestCase(test.TestCase): + """ + Unit tests for code that detects the ImageType + """ + def setUp(self): + super(XenAPIDetermineDiskImageTestCase, self).setUp() + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) + + class FakeInstance(object): + pass + + self.fake_instance = FakeInstance() + self.fake_instance.id = 42 + + def assert_disk_type(self, disk_type): + dt = vm_utils.VMHelper.determine_disk_image_type( + self.fake_instance) + self.assertEqual(disk_type, dt) + + def test_instance_disk(self): + """ + If a kernel is specified then the image type is DISK (aka machine) + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL + self.assert_disk_type(vm_utils.ImageType.DISK) + + def test_instance_disk_raw(self): + """ + If the kernel isn't specified, and we're not using Glance, then + DISK_RAW is assumed. + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_raw(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'raw'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_vhd(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'vhd'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 2e3b62a773e9..1e6758a337ec 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -130,6 +130,16 @@ def stubout_stream_disk(stubs): stubs.Set(vm_utils, '_stream_disk', f) +def stubout_lookup_image(stubs): + @classmethod + def fake_lookup_image(cls, session, instance_id, vdi_ref): + # NOTE(sirp): pretending each image is paravirtualized for now + is_pv = True + return is_pv + + stubs.Set(vm_utils.VMHelper, 'lookup_image', fake_lookup_image) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 278e52211918..9027d58c4ddd 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -490,6 +490,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, instance_id, vdi_ref): + """ + Determine if VDI is using a PV kernel + """ if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -517,6 +520,7 @@ class VMHelper(HelperBase): def is_vdi_pv(dev): LOG.debug(_("Running pygrub against %s"), dev) + # TODO(sirp): use subprocess here output = os.popen('pygrub -qn /dev/%s' % dev) for line in output.readlines(): #try to find kernel string From 8dceaccb81e95b55fac2156df4f04ef0a7469112 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 17 Feb 2011 07:58:42 +0000 Subject: [PATCH 25/39] Typo fixes --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 4 ++-- run_tests.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index a91f8a7c1cd1..3b5cedda7562 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -85,7 +85,7 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, conn.close() -def fixup_vhds(sr_path, staging_path, uuid_stack): +def _fixup_vhds(sr_path, staging_path, uuid_stack): """Fixup the downloaded VHDs before we move them into the SR. We cannot extract VHDs directly into the SR since they don't yet have @@ -314,7 +314,7 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - vdi_uuid = fixup_vhds(sr_path, staging_path, uuid_stack) + vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) return vdi_uuid finally: _cleanup_staging_area(staging_path) diff --git a/run_tests.sh b/run_tests.sh index 6c7dd5f46ad7..7ac5ae07124f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -75,7 +75,7 @@ if [ -z "$noseargs" ]; then PEP8_EXCLUDE=vcsversion.py PEP8_OPTIONS="--exclude=$PEP8_EXCLUDE --repeat --show-pep8 --show-source" - # TODO(sirp): Put tests/ run_tests.py and all plugins/ under pep8 control + # TODO(sirp): Put run_tests.py and all plugins/ under pep8 control PEP8_INCLUDE="bin/* nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" run_tests && pep8 $PEP8_OPTIONS $PEP8_INCLUDE || exit 1 else From 841b02230866cc163c26a264e86bba94c4b0335d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 17 Feb 2011 13:15:28 -0500 Subject: [PATCH 26/39] Adding myself to Authors and .mailmap files. --- .mailmap | 1 + Authors | 1 + 2 files changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index c6f6c9a8b628..95bf1a1bc18a 100644 --- a/.mailmap +++ b/.mailmap @@ -34,3 +34,4 @@ + diff --git a/Authors b/Authors index b359fec221c6..168a3cf9abba 100644 --- a/Authors +++ b/Authors @@ -42,6 +42,7 @@ Monty Taylor MORITA Kazutaka Muneyuki Noguchi Nachi Ueno +Naveed Massjouni Paul Voccio Ricardo Carrillo Cruz Rick Clark From 94f5a5748158e61bde43327970dd8e513ca36575 Mon Sep 17 00:00:00 2001 From: Nirmal Ranganathan Date: Thu, 17 Feb 2011 17:13:59 -0600 Subject: [PATCH 27/39] Always compare incoming flavor_id as an int --- nova/compute/instance_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 309313fd00f6..7a2a5baa3fdb 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -45,6 +45,6 @@ def get_by_type(instance_type): def get_by_flavor_id(flavor_id): for instance_type, details in INSTANCE_TYPES.iteritems(): - if details['flavorid'] == flavor_id: + if details['flavorid'] == int(flavor_id): return instance_type return FLAGS.default_instance_type From 4673afddcb5a1069f75fb3493e43498ed1de11f9 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 18 Feb 2011 02:23:30 -0500 Subject: [PATCH 28/39] Incorporating minor cleanups suggested by Rick Harris: * Use assertNotEqual instead of assertTrue * Use enumerate function instead of maintaining a counter --- nova/tests/api/openstack/test_servers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 630e1e5eb6cd..4ed13022bce5 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -259,15 +259,13 @@ class ServersTest(unittest.TestCase): server_list = res_dict['servers'] host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] self.assertTrue(host_ids[0] and host_ids[1]) - self.assertTrue(host_ids[0] != host_ids[1]) + self.assertNotEqual(host_ids[0], host_ids[1]) - i = 0 - for s in res_dict['servers']: + for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], 10) - i += 1 def test_server_pause(self): FLAGS.allow_admin_api = True From cd533e160e9c98a0c14b4e0bc32a6e94c7ab8657 Mon Sep 17 00:00:00 2001 From: Nirmal Ranganathan Date: Fri, 18 Feb 2011 11:44:38 -0600 Subject: [PATCH 29/39] Added Author and tests --- Authors | 1 + nova/tests/test_compute.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/Authors b/Authors index 494e614a0294..1d2316cf5ac8 100644 --- a/Authors +++ b/Authors @@ -46,6 +46,7 @@ MORITA Kazutaka Muneyuki Noguchi Nachi Ueno Naveed Massjouni +Nirmal Ranganathan Paul Voccio Ricardo Carrillo Cruz Rick Clark diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b049ac9439a1..e338a7cfae31 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -30,6 +30,7 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager +from nova.compute import instance_types LOG = logging.getLogger('nova.tests.compute') @@ -266,3 +267,11 @@ class ComputeTestCase(test.TestCase): self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) + + def test_get_by_flavor_id(self): + type = instance_types.get_by_flavor_id(1) + self.assertEqual(type, 'm1.tiny') + + type = instance_types.get_by_flavor_id("1") + self.assertEqual(type, 'm1.tiny') + From 8916442e7f2920938a317777de71f75faf463005 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 18 Feb 2011 21:42:04 +0000 Subject: [PATCH 30/39] Typo fix --- nova/virt/xenapi/vm_utils.py | 2 ++ nova/virt/xenapi/vmops.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9027d58c4ddd..88a205d2f55e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -451,6 +451,8 @@ class VMHelper(HelperBase): else: return ImageType.DISK_RAW + # FIXME(sirp): can we unify the ImageService and xenapi_image_service + # abstractions? if FLAGS.xenapi_image_service == 'glance': image_type = determine_from_glance() else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8d1c79c0b630..09abd5861da6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -394,7 +394,7 @@ class VMOps(object): """ Three situations can occur: - 1. We have netiher a ramdisk or a kernel, in which case we are a + 1. We have neither a ramdisk nor a kernel, in which case we are a RAW image and can omit this step 2. We have one or the other, in which case, we should flag as an From eefc8938d8a8010052affab9a5c0d010778d9780 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 18 Feb 2011 22:25:19 +0000 Subject: [PATCH 31/39] Changing type -> image_type --- nova/virt/xenapi/vm_utils.py | 44 ++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 88a205d2f55e..2114bfa42517 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -305,9 +305,10 @@ class VMHelper(HelperBase): session.wait_for_task(instance_id, task) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, type): + def fetch_image(cls, session, instance_id, image, user, project, + image_type): """ - type is interpreted as an ImageType instance + image_type is interpreted as an ImageType instance Related flags: xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' @@ -317,14 +318,15 @@ class VMHelper(HelperBase): if FLAGS.xenapi_image_service == 'glance': return cls._fetch_image_glance(session, instance_id, image, - access, type) + access, image_type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, type) + access, user.secret, + image_type) @classmethod def _fetch_image_glance_vhd(cls, session, instance_id, image, access, - type): + image_type): LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) @@ -358,7 +360,7 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, - type): + image_type): """Fetch the image from Glance NOTE: @@ -378,7 +380,7 @@ class VMHelper(HelperBase): vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - if type == ImageType.DISK: + if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -387,9 +389,9 @@ class VMHelper(HelperBase): with_vdi_attached_here(session, vdi, False, lambda dev: - _stream_disk(dev, type, + _stream_disk(dev, image_type, virtual_size, image_file)) - if (type == ImageType.KERNEL_RAMDISK): + if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) @@ -462,29 +464,33 @@ class VMHelper(HelperBase): return image_type @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): - if type == ImageType.DISK_VHD: + def _fetch_image_glance(cls, session, instance_id, image, access, + image_type): + if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( - session, instance_id, image, access, type) + session, instance_id, image, access, image_type) else: return cls._fetch_image_glance_disk( - session, instance_id, image, access, type) + session, instance_id, image, access, image_type) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, type): + secret, image_type): url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' + if image_type == ImageType.KERNEL_RAMDISK: + fn = 'get_kernel' + else: + fn = 'get_vdi' args = {} args['src_url'] = url args['username'] = access args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if type != ImageType.KERNEL_RAMDISK: + if image_type != ImageType.KERNEL_RAMDISK: args['add_partition'] = 'true' - if type == ImageType.DISK_RAW: + if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(instance_id, task) @@ -857,9 +863,9 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev, type, virtual_size, image_file): +def _stream_disk(dev, image_type, virtual_size, image_file): offset = 0 - if type == ImageType.DISK: + if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) From 05d135be0c0d8a90a97d62005a101345964800cf Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 18 Feb 2011 22:50:13 +0000 Subject: [PATCH 32/39] Typo fix --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 3b5cedda7562..097670b588d9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -215,7 +215,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): 'x-image-meta-property-kernel-id': 'nokernel', 'x-image-meta-property-ramdisk-id': 'noramdisk', 'x-image-meta-property-container-format': 'tarball', - 'transfer-encoding': "chunked", + 'transfer-encoding': 'chunked', 'content-type': 'application/octet-stream', } for header, value in headers.iteritems(): From 8cf6a0c01ee39066f17a11d5e9313c2828a59634 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 25 Feb 2011 01:50:18 +0000 Subject: [PATCH 33/39] No longer users image/ directory in tarball --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index e229a9358eab..bcdf34413bda 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -163,16 +163,14 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): "VHD %(path)s is marked as hidden without child" % locals()) - image_path = os.path.join(staging_path, 'image') - - orig_base_copy_path = os.path.join(image_path, 'image.vhd') + orig_base_copy_path = os.path.join(staging_path, 'image.vhd') if not os.path.exists(orig_base_copy_path): raise Exception("Invalid image: image.vhd not present") base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) vdi_uuid = base_copy_uuid - orig_snap_path = os.path.join(image_path, 'snap.vhd') + orig_snap_path = os.path.join(staging_path, 'snap.vhd') if os.path.exists(orig_snap_path): snap_path, snap_uuid = rename_with_uuid(orig_snap_path) vdi_uuid = snap_uuid @@ -192,11 +190,9 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): """Hard-link VHDs into staging area with appropriate filename ('snap' or 'image.vhd') """ - image_path = os.path.join(staging_path, 'image') - os.mkdir(image_path) for name, uuid in vdi_uuids.items(): source = os.path.join(sr_path, "%s.vhd" % uuid) - link_name = os.path.join(image_path, "%s.vhd" % name) + link_name = os.path.join(staging_path, "%s.vhd" % name) os.link(source, link_name) From ec9ede003c839248ca9593c03160a23ff8ec0db1 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 25 Feb 2011 02:26:46 +0000 Subject: [PATCH 34/39] Adding _make_subprocess function --- .../xenapi/etc/xapi.d/plugins/glance | 57 ++++++++++--------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index bcdf34413bda..869d46e70c84 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -75,17 +75,15 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, elif resp.status != httplib.OK: raise Exception("Unexpected response from Glance %i" % res.status) - tar_args = shlex.split( - "tar -zx --directory=%(staging_path)s" % locals()) - tar_proc = subprocess.Popen( - tar_args, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() + tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) chunk = resp.read(CHUNK_SIZE) while chunk: tar_proc.stdin.write(chunk) chunk = resp.read(CHUNK_SIZE) - _assert_process_success(tar_proc, "tar") + _finish_subprocess(tar_proc, "tar") conn.close() @@ -130,12 +128,10 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): This needs to be done before we move both VHDs into the SR to prevent the base_copy from being DOA (deleted-on-arrival). """ - modify_args = shlex.split( - "vhd-util modify -n %(child_path)s -p %(parent_path)s" - % locals()) - modify_proc = subprocess.Popen( - modify_args, stderr=subprocess.PIPE) - _assert_process_success(modify_proc, "vhd-util") + modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = _make_subprocess(modify_cmd, stderr=True) + _finish_subprocess(modify_proc, "vhd-util") def move_into_sr(orig_path): """Move a file into the SR""" @@ -150,11 +146,9 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): present, then the image.vhd better not be marked 'hidden' or it will be deleted when moved into the SR. """ - vhd_query_args = shlex.split( - "vhd-util query -n %(path)s -f" % locals()) - vhd_query_proc = subprocess.Popen( - vhd_query_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = _assert_process_success(vhd_query_proc, "vhd-util") + query_cmd = "vhd-util query -n %(path)s -f" % locals() + query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True) + out, err = _finish_subprocess(query_proc, "vhd-util") for line in out.splitlines(): if line.startswith('hidden'): value = line.split(':')[1].strip() @@ -208,25 +202,24 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): # FIXME(sirp): nokernel signals Nova to use a raw image. This is defined by # FLAGS.null_kernel. Is there a way to get rid of this? + # TODO(sirp): make `store` configurable headers = { - 'x-image-meta-store': 'file', + 'content-type': 'application/octet-stream', + 'transfer-encoding': 'chunked', 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'vhd', 'x-image-meta-status': 'queued', + 'x-image-meta-store': 'file', + 'x-image-meta-type': 'vhd', 'x-image-meta-property-kernel-id': 'nokernel', 'x-image-meta-property-ramdisk-id': 'noramdisk', 'x-image-meta-property-container-format': 'tarball', - 'transfer-encoding': 'chunked', - 'content-type': 'application/octet-stream', } for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() - tar_args = shlex.split( - "tar -zc --directory=%(staging_path)s ." % locals()) - tar_proc = subprocess.Popen( - tar_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals() + tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True) chunk = tar_proc.stdout.read(CHUNK_SIZE) while chunk: @@ -234,7 +227,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): chunk = tar_proc.stdout.read(CHUNK_SIZE) conn.send("0\r\n\r\n") - _assert_process_success(tar_proc, "tar") + _finish_subprocess(tar_proc, "tar") resp = conn.getresponse() if resp.status != httplib.OK: @@ -289,7 +282,19 @@ def _cleanup_staging_area(staging_path): shutil.rmtree(staging_path) -def _assert_process_success(proc, cmd): +def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False): + """Make a subprocess according to the given command-line string + """ + kwargs = {} + kwargs['stdout'] = stdout and subprocess.PIPE or None + kwargs['stderr'] = stderr and subprocess.PIPE or None + kwargs['stdin'] = stdin and subprocess.PIPE or None + args = shlex.split(cmdline) + proc = subprocess.Popen(args, **kwargs) + return proc + + +def _finish_subprocess(proc, cmd): """Ensure that the process returned a zero exit code indicating success """ out, err = proc.communicate() From e3d6dc70a6b77d80afcf87473bc79549540ac4ce Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 25 Feb 2011 02:51:14 +0000 Subject: [PATCH 35/39] Removing unecessary nokernel stuff --- nova/api/openstack/servers.py | 51 +++++++++++-------- .../xenapi/etc/xapi.d/plugins/glance | 4 -- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c8c94f1fd398..f51da0cdd918 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -136,28 +136,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def _get_kernel_ramdisk_from_image(self, req, image_id): - """ - Machine images are associated with Kernels and Ramdisk images via - metadata stored in Glance as 'image_properties' - """ - # FIXME(sirp): Currently Nova requires us to specify the `null_kernel` - # identifier ('nokernel') to indicate a RAW (or VHD) image. It would - # be better if we could omit the kernel_id and ramdisk_id properties - # on the image - def lookup(image, param): - _image_id = image['id'] - try: - return image['properties'][param] - except KeyError: - LOG.debug( - _("%(param)s property not found for image %(_image_id)s") % - locals()) - return None - - image = self._image_service.show(req.environ['nova.context'], image_id) - return lookup(image, 'kernel_id'), lookup(image, 'ramdisk_id') - def create(self, req): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) @@ -367,3 +345,32 @@ class Controller(wsgi.Controller): action=item.action, error=item.error)) return dict(actions=actions) + + def _get_kernel_ramdisk_from_image(self, req, image_id): + """Retrevies kernel and ramdisk IDs from Glance + + Only 'machine' (ami) type use kernel and ramdisk outside of the + image. + """ + # FIXME(sirp): Since we're retrieving the kernel_id from an + # image_property, this means only Glance is supported. + # The BaseImageService needs to expose a consistent way of accessing + # kernel_id and ramdisk_id + image = self._image_service.show(req.environ['nova.context'], image_id) + + if image['type'] != 'machine': + return None, None + + try: + kernel_id = image['properties']['kernel_id'] + except KeyError: + raise exception.NotFound( + _("Kernel not found for image %(image_id)s") % locals()) + + try: + ramdisk_id = image['properties']['ramdisk_id'] + except KeyError: + raise exception.NotFound( + _("Ramdisk not found for image %(image_id)s") % locals()) + + return kernel_id, ramdisk_id diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 869d46e70c84..18e4866f7898 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -200,8 +200,6 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): # to request conn.putrequest('PUT', '/images/%s' % image_id) - # FIXME(sirp): nokernel signals Nova to use a raw image. This is defined by - # FLAGS.null_kernel. Is there a way to get rid of this? # TODO(sirp): make `store` configurable headers = { 'content-type': 'application/octet-stream', @@ -210,8 +208,6 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): 'x-image-meta-status': 'queued', 'x-image-meta-store': 'file', 'x-image-meta-type': 'vhd', - 'x-image-meta-property-kernel-id': 'nokernel', - 'x-image-meta-property-ramdisk-id': 'noramdisk', 'x-image-meta-property-container-format': 'tarball', } for header, value in headers.iteritems(): From d27197ae53f3282280198d9bfe7f37a059fa8a35 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 25 Feb 2011 03:16:04 +0000 Subject: [PATCH 36/39] Removing unecessary headers --- .../xenapi/etc/xapi.d/plugins/glance | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 18e4866f7898..7531af4ec72f 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -83,7 +83,7 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, tar_proc.stdin.write(chunk) chunk = resp.read(CHUNK_SIZE) - _finish_subprocess(tar_proc, "tar") + _finish_subprocess(tar_proc, tar_cmd) conn.close() @@ -131,7 +131,7 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s" % locals()) modify_proc = _make_subprocess(modify_cmd, stderr=True) - _finish_subprocess(modify_proc, "vhd-util") + _finish_subprocess(modify_proc, modify_cmd) def move_into_sr(orig_path): """Move a file into the SR""" @@ -148,7 +148,8 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): """ query_cmd = "vhd-util query -n %(path)s -f" % locals() query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True) - out, err = _finish_subprocess(query_proc, "vhd-util") + out, err = _finish_subprocess(query_proc, query_cmd) + for line in out.splitlines(): if line.startswith('hidden'): value = line.split(':')[1].strip() @@ -206,9 +207,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): 'transfer-encoding': 'chunked', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-store': 'file', - 'x-image-meta-type': 'vhd', - 'x-image-meta-property-container-format': 'tarball', + 'x-image-meta-type': 'vhd' } for header, value in headers.iteritems(): conn.putheader(header, value) @@ -223,7 +222,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): chunk = tar_proc.stdout.read(CHUNK_SIZE) conn.send("0\r\n\r\n") - _finish_subprocess(tar_proc, "tar") + _finish_subprocess(tar_proc, tar_cmd) resp = conn.getresponse() if resp.status != httplib.OK: @@ -290,14 +289,14 @@ def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False): return proc -def _finish_subprocess(proc, cmd): +def _finish_subprocess(proc, cmdline): """Ensure that the process returned a zero exit code indicating success """ out, err = proc.communicate() ret = proc.returncode if ret != 0: - msg = "%(cmd)s returned non-zero exit code (%i): '%s'" % (ret, err) - raise Exception(msg) + raise Exception("'%(cmdline)s' returned non-zero exit code: " + "retcode=%(ret)i, stderr='%(err)s'" % locals()) return out, err From 079b532a1080da9fe5d99e90fa9c60d16506de06 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 25 Feb 2011 16:24:51 +0000 Subject: [PATCH 37/39] Verify status of image is active --- nova/api/openstack/servers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f51da0cdd918..bf5663f60dd2 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -358,6 +358,11 @@ class Controller(wsgi.Controller): # kernel_id and ramdisk_id image = self._image_service.show(req.environ['nova.context'], image_id) + if image['status'] != 'active': + raise exception.Invalid( + _("Cannot build from image %(image_id)s, status not active") % + locals()) + if image['type'] != 'machine': return None, None From a457595224a5ca5cdb0191ba2f6fa542d16e18f5 Mon Sep 17 00:00:00 2001 From: Nirmal Ranganathan Date: Mon, 28 Feb 2011 11:25:14 -0600 Subject: [PATCH 38/39] Removed extraneous newline --- nova/tests/test_compute.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e338a7cfae31..949b5e6ebe7e 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -274,4 +274,3 @@ class ComputeTestCase(test.TestCase): type = instance_types.get_by_flavor_id("1") self.assertEqual(type, 'm1.tiny') - From 4572ffcf734b734870b90497063fc27e7642f67c Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 28 Feb 2011 19:56:46 -0500 Subject: [PATCH 39/39] No reason to initialize metadata twice. --- nova/api/openstack/servers.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7d20f681b59d..69273ad7b09f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -72,8 +72,6 @@ def _translate_detail_keys(inst): public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') inst_dict['addresses']['public'] = public_ips - inst_dict['metadata'] = {} - # Return the metadata as a dictionary metadata = {} for item in inst['metadata']: