Fixed after Jay's review.
Integrated code from Soren (we now use the same 'magic number' for images without kernel & ramdisk.
This commit is contained in:
commit
0ef58bac84
2
Authors
2
Authors
@ -27,6 +27,7 @@ Paul Voccio <paul@openstack.org>
|
||||
Rick Clark <rick@openstack.org>
|
||||
Ryan Lane <rlane@wikimedia.org>
|
||||
Ryan Lucio <rlucio@internap.com>
|
||||
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
|
||||
Sandy Walsh <sandy.walsh@rackspace.com>
|
||||
Soren Hansen <soren.hansen@rackspace.com>
|
||||
Thierry Carrez <thierry@openstack.org>
|
||||
@ -35,3 +36,4 @@ Trey Morris <trey.morris@rackspace.com>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||
|
||||
|
@ -91,15 +91,16 @@ class ComputeAPI(base.Base):
|
||||
is_vpn = image_id == FLAGS.vpn_image_id
|
||||
if not is_vpn:
|
||||
image = self.image_service.show(context, image_id)
|
||||
|
||||
# If kernel_id/ramdisk_id isn't explicitly set in API call
|
||||
# we take the defaults from the image's metadata
|
||||
if kernel_id is None:
|
||||
kernel_id = image.get('kernelId', None)
|
||||
if ramdisk_id is None:
|
||||
ramdisk_id = image.get('ramdiskId', None)
|
||||
|
||||
# Make sure we have access to kernel and ramdisk
|
||||
#No kernel and ramdisk for raw images
|
||||
if kernel_id == str(FLAGS.null_kernel):
|
||||
kernel_id = None
|
||||
ramdisk_id = None
|
||||
logging.debug("Creating a raw instance")
|
||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||
if kernel_id:
|
||||
self.image_service.show(context, kernel_id)
|
||||
if ramdisk_id:
|
||||
|
@ -157,7 +157,6 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
(lambda t: t.find('./devices/serial/source').get(
|
||||
'path').split('/')[1], 'console.log'),
|
||||
(lambda t: t.find('./memory').text, '2097152')]
|
||||
|
||||
if rescue:
|
||||
common_checks += [
|
||||
(lambda t: t.findall('./devices/disk/source')[0].get(
|
||||
|
@ -512,9 +512,10 @@ class LibvirtConnection(object):
|
||||
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
net, mask = _get_net_and_mask(network['cidr'])
|
||||
extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n"
|
||||
"<parameter name=\"PROJMASK\" value=\"%s\" />\n"
|
||||
) % (net, mask)
|
||||
extra_params = ("<parameter name=\"PROJNET\" "
|
||||
"value=\"%s\" />\n"
|
||||
"<parameter name=\"PROJMASK\" "
|
||||
"value=\"%s\" />\n") % (net, mask)
|
||||
else:
|
||||
extra_params = "\n"
|
||||
|
||||
@ -800,8 +801,8 @@ class NWFilterFirewall(object):
|
||||
the base filter are all in place.
|
||||
"""
|
||||
|
||||
nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n"
|
||||
) % instance['name']
|
||||
nwfilter_xml = ("<filter name='nova-instance-%s' "
|
||||
"chain='root'>\n") % instance['name']
|
||||
|
||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||
nwfilter_xml += " <filterref filter='nova-vpn' />\n"
|
||||
@ -814,8 +815,8 @@ class NWFilterFirewall(object):
|
||||
for security_group in instance.security_groups:
|
||||
self.ensure_security_group_filter(security_group['id'])
|
||||
|
||||
nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
|
||||
) % security_group['id']
|
||||
nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
|
||||
"/>\n") % security_group['id']
|
||||
nwfilter_xml += "</filter>"
|
||||
|
||||
self._define_filter(nwfilter_xml)
|
||||
|
@ -235,6 +235,7 @@ class SessionBase(object):
|
||||
elif '.' in name:
|
||||
impl = getattr(self, name.replace('.', '_'))
|
||||
if impl is not None:
|
||||
|
||||
def callit(*params):
|
||||
logging.warn('Calling %s %s', name, impl)
|
||||
self._check_session(params)
|
||||
|
@ -43,15 +43,31 @@ XENAPI_POWER_STATE = {
|
||||
'Crashed': power_state.CRASHED}
|
||||
|
||||
|
||||
class ImageType:
|
||||
"""
|
||||
Enumeration class for distinguishing different image types
|
||||
0 - kernel/ramdisk image (goes on dom0's filesystem)
|
||||
1 - disk image (local SR, partitioned by objectstore plugin)
|
||||
2 - raw disk image (local SR, NOT partitioned by plugin)
|
||||
"""
|
||||
|
||||
KERNEL_RAMDISK = 0
|
||||
DISK = 1
|
||||
DISK_RAW = 2
|
||||
|
||||
|
||||
class VMHelper(HelperBase):
|
||||
"""
|
||||
The class that wraps the helper methods together.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def create_vm(cls, session, instance, kernel, ramdisk):
|
||||
def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
|
||||
"""Create a VM record. Returns a Deferred that gives the new
|
||||
VM reference."""
|
||||
VM reference.
|
||||
the pv_kernel flag indicates whether the guest is HVM or PV
|
||||
"""
|
||||
|
||||
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
|
||||
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
|
||||
vcpus = str(instance_type['vcpus'])
|
||||
@ -70,9 +86,9 @@ class VMHelper(HelperBase):
|
||||
'actions_after_reboot': 'restart',
|
||||
'actions_after_crash': 'destroy',
|
||||
'PV_bootloader': '',
|
||||
'PV_kernel': kernel,
|
||||
'PV_ramdisk': ramdisk,
|
||||
'PV_args': 'root=/dev/xvda1',
|
||||
'PV_kernel': '',
|
||||
'PV_ramdisk': '',
|
||||
'PV_args': '',
|
||||
'PV_bootloader_args': '',
|
||||
'PV_legacy_args': '',
|
||||
'HVM_boot_policy': '',
|
||||
@ -84,7 +100,25 @@ class VMHelper(HelperBase):
|
||||
'user_version': '0',
|
||||
'other_config': {},
|
||||
}
|
||||
logging.debug(_('Created VM %s...'), instance.name)
|
||||
#Complete VM configuration record according to the image type
|
||||
#non-raw/raw with PV kernel/raw in HVM mode
|
||||
if instance.kernel_id:
|
||||
rec['PV_bootloader'] = ''
|
||||
rec['PV_kernel'] = kernel
|
||||
rec['PV_ramdisk'] = ramdisk
|
||||
rec['PV_args'] = 'root=/dev/xvda1'
|
||||
rec['PV_bootloader_args'] = ''
|
||||
rec['PV_legacy_args'] = ''
|
||||
else:
|
||||
if pv_kernel:
|
||||
rec['PV_args'] = 'noninteractive'
|
||||
rec['PV_bootloader'] = 'pygrub'
|
||||
else:
|
||||
rec['HVM_boot_policy'] = 'BIOS order'
|
||||
rec['HVM_boot_params'] = {'order': 'dc'}
|
||||
rec['platform'] = {'acpi': 'true', 'apic': 'true',
|
||||
'pae': 'true', 'viridian': 'true'}
|
||||
logging.debug('Created VM %s...', instance.name)
|
||||
vm_ref = session.call_xenapi('VM.create', rec)
|
||||
logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
|
||||
return vm_ref
|
||||
@ -170,28 +204,46 @@ class VMHelper(HelperBase):
|
||||
return vif_ref
|
||||
|
||||
@classmethod
|
||||
def fetch_image(cls, session, image, user, project, use_sr):
|
||||
"""use_sr: True to put the image as a VDI in an SR, False to place
|
||||
it on dom0's filesystem. The former is for VM disks, the latter for
|
||||
its kernel and ramdisk (if external kernels are being used).
|
||||
Returns a Deferred that gives the new VDI UUID."""
|
||||
|
||||
def fetch_image(cls, session, image, user, project, type):
|
||||
"""
|
||||
type is interpreted as an ImageType instance
|
||||
"""
|
||||
url = images.image_url(image)
|
||||
access = AuthManager().get_access_key(user, project)
|
||||
logging.debug(_("Asking xapi to fetch %s as %s"), url, access)
|
||||
fn = use_sr and 'get_vdi' or 'get_kernel'
|
||||
logging.debug("Asking xapi to fetch %s as %s", url, access)
|
||||
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
|
||||
args = {}
|
||||
args['src_url'] = url
|
||||
args['username'] = access
|
||||
args['password'] = user.secret
|
||||
if use_sr:
|
||||
args['add_partition'] = 'false'
|
||||
args['raw'] = 'false'
|
||||
if type != ImageType.KERNEL_RAMDISK:
|
||||
args['add_partition'] = 'true'
|
||||
if type == ImageType.DISK_RAW:
|
||||
args['raw'] = 'true'
|
||||
task = session.async_call_plugin('objectstore', fn, args)
|
||||
#FIXME(armando): find a solution to missing instance_id
|
||||
#with Josh Kearney
|
||||
uuid = session.wait_for_task(0, task)
|
||||
return uuid
|
||||
|
||||
@classmethod
|
||||
def lookup_image(cls, session, vdi_ref):
|
||||
logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
|
||||
fn = "is_vdi_pv"
|
||||
args = {}
|
||||
args['vdi-ref'] = vdi_ref
|
||||
#TODO: Call proper function in plugin
|
||||
task = session.async_call_plugin('objectstore', fn, args)
|
||||
pv_str = session.wait_for_task(task)
|
||||
if pv_str.lower() == 'true':
|
||||
pv = True
|
||||
elif pv_str.lower() == 'false':
|
||||
pv = False
|
||||
logging.debug("PV Kernel in VDI:%d", pv)
|
||||
return pv
|
||||
|
||||
@classmethod
|
||||
def lookup(cls, session, i):
|
||||
"""Look the instance i up, and returns it if available"""
|
||||
|
@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager
|
||||
from nova.compute import power_state
|
||||
from nova.virt.xenapi.network_utils import NetworkHelper
|
||||
from nova.virt.xenapi.vm_utils import VMHelper
|
||||
from nova.virt.xenapi.vm_utils import ImageType
|
||||
|
||||
|
||||
class VMOps(object):
|
||||
@ -64,16 +65,30 @@ class VMOps(object):
|
||||
|
||||
user = AuthManager().get_user(instance.user_id)
|
||||
project = AuthManager().get_project(instance.project_id)
|
||||
vdi_uuid = VMHelper.fetch_image(
|
||||
self._session, instance.image_id, user, project, True)
|
||||
kernel = VMHelper.fetch_image(
|
||||
self._session, instance.kernel_id, user, project, False)
|
||||
ramdisk = VMHelper.fetch_image(
|
||||
self._session, instance.ramdisk_id, user, project, False)
|
||||
#if kernel is not present we must download a raw disk
|
||||
if instance.kernel_id:
|
||||
disk_image_type = ImageType.DISK
|
||||
else:
|
||||
disk_image_type = ImageType.DISK_RAW
|
||||
vdi_uuid = VMHelper.fetch_image(self._session,
|
||||
instance.image_id, user, project, disk_image_type)
|
||||
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||
vm_ref = VMHelper.create_vm(
|
||||
self._session, instance, kernel, ramdisk)
|
||||
#Have a look at the VDI and see if it has a PV kernel
|
||||
pv_kernel = False
|
||||
if not instance.kernel_id:
|
||||
pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
|
||||
kernel = None
|
||||
if instance.kernel_id:
|
||||
kernel = VMHelper.fetch_image(self._session,
|
||||
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
|
||||
ramdisk = None
|
||||
if instance.ramdisk_id:
|
||||
ramdisk = VMHelper.fetch_image(self._session,
|
||||
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
|
||||
vm_ref = VMHelper.create_vm(self._session,
|
||||
instance, kernel, ramdisk, pv_kernel)
|
||||
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
|
||||
|
||||
if network_ref:
|
||||
VMHelper.create_vif(self._session, vm_ref,
|
||||
network_ref, instance.mac_address)
|
||||
|
@ -60,13 +60,11 @@ class VolumeHelper(HelperBase):
|
||||
'port': info['targetPort'],
|
||||
'targetIQN': info['targetIQN'],
|
||||
'chapuser': info['chapuser'],
|
||||
'chappassword': info['chappassword']
|
||||
}
|
||||
'chappassword': info['chappassword']}
|
||||
else:
|
||||
record = {'target': info['targetHost'],
|
||||
'port': info['targetPort'],
|
||||
'targetIQN': info['targetIQN']
|
||||
}
|
||||
'targetIQN': info['targetIQN']}
|
||||
try:
|
||||
sr_ref = session.get_xenapi().SR.create(
|
||||
session.get_xenapi_host(),
|
||||
|
@ -43,24 +43,43 @@ SECTOR_SIZE = 512
|
||||
MBR_SIZE_SECTORS = 63
|
||||
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
|
||||
|
||||
def is_vdi_pv(session,args):
|
||||
logging.debug("Checking wheter VDI has PV kernel")
|
||||
vdi = exists(args, 'vdi-ref')
|
||||
pv=with_vdi_in_dom0(session, vdi, False,
|
||||
lambda dev: _is_vdi_pv('/dev/%s' % dev))
|
||||
if pv:
|
||||
return 'true'
|
||||
else:
|
||||
return 'false'
|
||||
|
||||
def _is_vdi_pv(dest):
|
||||
logging.debug("Running pygrub against %s",dest)
|
||||
output=os.popen('pygrub -qn %s' % dest)
|
||||
pv=False
|
||||
for line in output.readlines():
|
||||
#try to find kernel string
|
||||
m=re.search('(?<=kernel:)/.*(?:>)',line)
|
||||
if m:
|
||||
if m.group(0).find('xen')!=-1:
|
||||
pv=True
|
||||
logging.debug("PV:%d",pv)
|
||||
return pv
|
||||
|
||||
def get_vdi(session, args):
|
||||
src_url = exists(args, 'src_url')
|
||||
username = exists(args, 'username')
|
||||
password = exists(args, 'password')
|
||||
raw_image=validate_bool(args, 'raw', 'false')
|
||||
add_partition = validate_bool(args, 'add_partition', 'false')
|
||||
|
||||
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
|
||||
|
||||
sr = find_sr(session)
|
||||
if sr is None:
|
||||
raise Exception('Cannot find SR to write VDI to')
|
||||
|
||||
virtual_size = \
|
||||
get_content_length(proto, netloc, url_path, username, password)
|
||||
if virtual_size < 0:
|
||||
raise Exception('Cannot get VDI size')
|
||||
|
||||
vdi_size = virtual_size
|
||||
if add_partition:
|
||||
# Make room for MBR.
|
||||
@ -69,18 +88,19 @@ def get_vdi(session, args):
|
||||
vdi = create_vdi(session, sr, src_url, vdi_size, False)
|
||||
with_vdi_in_dom0(session, vdi, False,
|
||||
lambda dev: get_vdi_(proto, netloc, url_path,
|
||||
username, password, add_partition,
|
||||
username, password, add_partition,raw_image,
|
||||
virtual_size, '/dev/%s' % dev))
|
||||
return session.xenapi.VDI.get_uuid(vdi)
|
||||
|
||||
|
||||
def get_vdi_(proto, netloc, url_path, username, password, add_partition,
|
||||
def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
|
||||
virtual_size, dest):
|
||||
|
||||
if add_partition:
|
||||
#Salvatore: vdi should not be partitioned for raw images
|
||||
if (add_partition and not raw_image):
|
||||
write_partition(virtual_size, dest)
|
||||
|
||||
offset = add_partition and MBR_SIZE_BYTES or 0
|
||||
offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
|
||||
get(proto, netloc, url_path, username, password, dest, offset)
|
||||
|
||||
|
||||
@ -228,4 +248,5 @@ def download_all(response, length, dest_file, offset):
|
||||
|
||||
if __name__ == '__main__':
|
||||
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
|
||||
'get_kernel': get_kernel})
|
||||
'get_kernel': get_kernel,
|
||||
'is_vdi_pv': is_vdi_pv})
|
||||
|
Loading…
Reference in New Issue
Block a user