Fixed after Jay's review.

Integrated code from Soren (we now use the same 'magic number' for images without kernel & ramdisk.
This commit is contained in:
Salvatore Orlando 2010-12-27 14:06:50 +00:00 committed by Tarmac
commit 0ef58bac84
9 changed files with 139 additions and 49 deletions

View File

@ -27,6 +27,7 @@ Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org> Rick Clark <rick@openstack.org>
Ryan Lane <rlane@wikimedia.org> Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com> Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com> Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com> Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org> Thierry Carrez <thierry@openstack.org>
@ -35,3 +36,4 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com> Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com>

View File

@ -91,15 +91,16 @@ class ComputeAPI(base.Base):
is_vpn = image_id == FLAGS.vpn_image_id is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn: if not is_vpn:
image = self.image_service.show(context, image_id) image = self.image_service.show(context, image_id)
# If kernel_id/ramdisk_id isn't explicitly set in API call
# we take the defaults from the image's metadata
if kernel_id is None: if kernel_id is None:
kernel_id = image.get('kernelId', None) kernel_id = image.get('kernelId', None)
if ramdisk_id is None: if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', None) ramdisk_id = image.get('ramdiskId', None)
#No kernel and ramdisk for raw images
# Make sure we have access to kernel and ramdisk if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
logging.debug("Creating a raw instance")
# Make sure we have access to kernel and ramdisk (if not raw)
if kernel_id: if kernel_id:
self.image_service.show(context, kernel_id) self.image_service.show(context, kernel_id)
if ramdisk_id: if ramdisk_id:

View File

@ -157,7 +157,6 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./devices/serial/source').get( (lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'), 'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')] (lambda t: t.find('./memory').text, '2097152')]
if rescue: if rescue:
common_checks += [ common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get( (lambda t: t.findall('./devices/disk/source')[0].get(

View File

@ -512,9 +512,10 @@ class LibvirtConnection(object):
if FLAGS.allow_project_net_traffic: if FLAGS.allow_project_net_traffic:
net, mask = _get_net_and_mask(network['cidr']) net, mask = _get_net_and_mask(network['cidr'])
extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n" extra_params = ("<parameter name=\"PROJNET\" "
"<parameter name=\"PROJMASK\" value=\"%s\" />\n" "value=\"%s\" />\n"
) % (net, mask) "<parameter name=\"PROJMASK\" "
"value=\"%s\" />\n") % (net, mask)
else: else:
extra_params = "\n" extra_params = "\n"
@ -800,8 +801,8 @@ class NWFilterFirewall(object):
the base filter are all in place. the base filter are all in place.
""" """
nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" nwfilter_xml = ("<filter name='nova-instance-%s' "
) % instance['name'] "chain='root'>\n") % instance['name']
if instance['image_id'] == FLAGS.vpn_image_id: if instance['image_id'] == FLAGS.vpn_image_id:
nwfilter_xml += " <filterref filter='nova-vpn' />\n" nwfilter_xml += " <filterref filter='nova-vpn' />\n"
@ -814,8 +815,8 @@ class NWFilterFirewall(object):
for security_group in instance.security_groups: for security_group in instance.security_groups:
self.ensure_security_group_filter(security_group['id']) self.ensure_security_group_filter(security_group['id'])
nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
) % security_group['id'] "/>\n") % security_group['id']
nwfilter_xml += "</filter>" nwfilter_xml += "</filter>"
self._define_filter(nwfilter_xml) self._define_filter(nwfilter_xml)

View File

@ -235,6 +235,7 @@ class SessionBase(object):
elif '.' in name: elif '.' in name:
impl = getattr(self, name.replace('.', '_')) impl = getattr(self, name.replace('.', '_'))
if impl is not None: if impl is not None:
def callit(*params): def callit(*params):
logging.warn('Calling %s %s', name, impl) logging.warn('Calling %s %s', name, impl)
self._check_session(params) self._check_session(params)

View File

@ -43,15 +43,31 @@ XENAPI_POWER_STATE = {
'Crashed': power_state.CRASHED} 'Crashed': power_state.CRASHED}
class ImageType:
"""
Enumeration class for distinguishing different image types
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
class VMHelper(HelperBase): class VMHelper(HelperBase):
""" """
The class that wraps the helper methods together. The class that wraps the helper methods together.
""" """
@classmethod @classmethod
def create_vm(cls, session, instance, kernel, ramdisk): def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new """Create a VM record. Returns a Deferred that gives the new
VM reference.""" VM reference.
the pv_kernel flag indicates whether the guest is HVM or PV
"""
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024) mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus']) vcpus = str(instance_type['vcpus'])
@ -70,9 +86,9 @@ class VMHelper(HelperBase):
'actions_after_reboot': 'restart', 'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy', 'actions_after_crash': 'destroy',
'PV_bootloader': '', 'PV_bootloader': '',
'PV_kernel': kernel, 'PV_kernel': '',
'PV_ramdisk': ramdisk, 'PV_ramdisk': '',
'PV_args': 'root=/dev/xvda1', 'PV_args': '',
'PV_bootloader_args': '', 'PV_bootloader_args': '',
'PV_legacy_args': '', 'PV_legacy_args': '',
'HVM_boot_policy': '', 'HVM_boot_policy': '',
@ -84,7 +100,25 @@ class VMHelper(HelperBase):
'user_version': '0', 'user_version': '0',
'other_config': {}, 'other_config': {},
} }
logging.debug(_('Created VM %s...'), instance.name) #Complete VM configuration record according to the image type
#non-raw/raw with PV kernel/raw in HVM mode
if instance.kernel_id:
rec['PV_bootloader'] = ''
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_bootloader_args'] = ''
rec['PV_legacy_args'] = ''
else:
if pv_kernel:
rec['PV_args'] = 'noninteractive'
rec['PV_bootloader'] = 'pygrub'
else:
rec['HVM_boot_policy'] = 'BIOS order'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['platform'] = {'acpi': 'true', 'apic': 'true',
'pae': 'true', 'viridian': 'true'}
logging.debug('Created VM %s...', instance.name)
vm_ref = session.call_xenapi('VM.create', rec) vm_ref = session.call_xenapi('VM.create', rec)
logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref return vm_ref
@ -170,28 +204,46 @@ class VMHelper(HelperBase):
return vif_ref return vif_ref
@classmethod @classmethod
def fetch_image(cls, session, image, user, project, use_sr): def fetch_image(cls, session, image, user, project, type):
"""use_sr: True to put the image as a VDI in an SR, False to place """
it on dom0's filesystem. The former is for VM disks, the latter for type is interpreted as an ImageType instance
its kernel and ramdisk (if external kernels are being used). """
Returns a Deferred that gives the new VDI UUID."""
url = images.image_url(image) url = images.image_url(image)
access = AuthManager().get_access_key(user, project) access = AuthManager().get_access_key(user, project)
logging.debug(_("Asking xapi to fetch %s as %s"), url, access) logging.debug("Asking xapi to fetch %s as %s", url, access)
fn = use_sr and 'get_vdi' or 'get_kernel' fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {} args = {}
args['src_url'] = url args['src_url'] = url
args['username'] = access args['username'] = access
args['password'] = user.secret args['password'] = user.secret
if use_sr: args['add_partition'] = 'false'
args['raw'] = 'false'
if type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true' args['add_partition'] = 'true'
if type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args) task = session.async_call_plugin('objectstore', fn, args)
#FIXME(armando): find a solution to missing instance_id #FIXME(armando): find a solution to missing instance_id
#with Josh Kearney #with Josh Kearney
uuid = session.wait_for_task(0, task) uuid = session.wait_for_task(0, task)
return uuid return uuid
@classmethod
def lookup_image(cls, session, vdi_ref):
logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
#TODO: Call proper function in plugin
task = session.async_call_plugin('objectstore', fn, args)
pv_str = session.wait_for_task(task)
if pv_str.lower() == 'true':
pv = True
elif pv_str.lower() == 'false':
pv = False
logging.debug("PV Kernel in VDI:%d", pv)
return pv
@classmethod @classmethod
def lookup(cls, session, i): def lookup(cls, session, i):
"""Look the instance i up, and returns it if available""" """Look the instance i up, and returns it if available"""

View File

@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager
from nova.compute import power_state from nova.compute import power_state
from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
class VMOps(object): class VMOps(object):
@ -64,16 +65,30 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id) user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id) project = AuthManager().get_project(instance.project_id)
vdi_uuid = VMHelper.fetch_image( #if kernel is not present we must download a raw disk
self._session, instance.image_id, user, project, True) if instance.kernel_id:
kernel = VMHelper.fetch_image( disk_image_type = ImageType.DISK
self._session, instance.kernel_id, user, project, False) else:
ramdisk = VMHelper.fetch_image( disk_image_type = ImageType.DISK_RAW
self._session, instance.ramdisk_id, user, project, False) vdi_uuid = VMHelper.fetch_image(self._session,
instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
vm_ref = VMHelper.create_vm( #Have a look at the VDI and see if it has a PV kernel
self._session, instance, kernel, ramdisk) pv_kernel = False
if not instance.kernel_id:
pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
if network_ref: if network_ref:
VMHelper.create_vif(self._session, vm_ref, VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address) network_ref, instance.mac_address)

View File

@ -60,13 +60,11 @@ class VolumeHelper(HelperBase):
'port': info['targetPort'], 'port': info['targetPort'],
'targetIQN': info['targetIQN'], 'targetIQN': info['targetIQN'],
'chapuser': info['chapuser'], 'chapuser': info['chapuser'],
'chappassword': info['chappassword'] 'chappassword': info['chappassword']}
}
else: else:
record = {'target': info['targetHost'], record = {'target': info['targetHost'],
'port': info['targetPort'], 'port': info['targetPort'],
'targetIQN': info['targetIQN'] 'targetIQN': info['targetIQN']}
}
try: try:
sr_ref = session.get_xenapi().SR.create( sr_ref = session.get_xenapi().SR.create(
session.get_xenapi_host(), session.get_xenapi_host(),

View File

@ -43,24 +43,43 @@ SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63 MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
def is_vdi_pv(session,args):
logging.debug("Checking wheter VDI has PV kernel")
vdi = exists(args, 'vdi-ref')
pv=with_vdi_in_dom0(session, vdi, False,
lambda dev: _is_vdi_pv('/dev/%s' % dev))
if pv:
return 'true'
else:
return 'false'
def _is_vdi_pv(dest):
logging.debug("Running pygrub against %s",dest)
output=os.popen('pygrub -qn %s' % dest)
pv=False
for line in output.readlines():
#try to find kernel string
m=re.search('(?<=kernel:)/.*(?:>)',line)
if m:
if m.group(0).find('xen')!=-1:
pv=True
logging.debug("PV:%d",pv)
return pv
def get_vdi(session, args): def get_vdi(session, args):
src_url = exists(args, 'src_url') src_url = exists(args, 'src_url')
username = exists(args, 'username') username = exists(args, 'username')
password = exists(args, 'password') password = exists(args, 'password')
raw_image=validate_bool(args, 'raw', 'false')
add_partition = validate_bool(args, 'add_partition', 'false') add_partition = validate_bool(args, 'add_partition', 'false')
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
sr = find_sr(session) sr = find_sr(session)
if sr is None: if sr is None:
raise Exception('Cannot find SR to write VDI to') raise Exception('Cannot find SR to write VDI to')
virtual_size = \ virtual_size = \
get_content_length(proto, netloc, url_path, username, password) get_content_length(proto, netloc, url_path, username, password)
if virtual_size < 0: if virtual_size < 0:
raise Exception('Cannot get VDI size') raise Exception('Cannot get VDI size')
vdi_size = virtual_size vdi_size = virtual_size
if add_partition: if add_partition:
# Make room for MBR. # Make room for MBR.
@ -69,18 +88,19 @@ def get_vdi(session, args):
vdi = create_vdi(session, sr, src_url, vdi_size, False) vdi = create_vdi(session, sr, src_url, vdi_size, False)
with_vdi_in_dom0(session, vdi, False, with_vdi_in_dom0(session, vdi, False,
lambda dev: get_vdi_(proto, netloc, url_path, lambda dev: get_vdi_(proto, netloc, url_path,
username, password, add_partition, username, password, add_partition,raw_image,
virtual_size, '/dev/%s' % dev)) virtual_size, '/dev/%s' % dev))
return session.xenapi.VDI.get_uuid(vdi) return session.xenapi.VDI.get_uuid(vdi)
def get_vdi_(proto, netloc, url_path, username, password, add_partition, def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
virtual_size, dest): virtual_size, dest):
if add_partition: #Salvatore: vdi should not be partitioned for raw images
if (add_partition and not raw_image):
write_partition(virtual_size, dest) write_partition(virtual_size, dest)
offset = add_partition and MBR_SIZE_BYTES or 0 offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
get(proto, netloc, url_path, username, password, dest, offset) get(proto, netloc, url_path, username, password, dest, offset)
@ -228,4 +248,5 @@ def download_all(response, length, dest_file, offset):
if __name__ == '__main__': if __name__ == '__main__':
XenAPIPlugin.dispatch({'get_vdi': get_vdi, XenAPIPlugin.dispatch({'get_vdi': get_vdi,
'get_kernel': get_kernel}) 'get_kernel': get_kernel,
'is_vdi_pv': is_vdi_pv})