Bootable volume support
Change-Id: I2989587204766dc1dfc51979608a691578d1f660
This commit is contained in:
parent
34a82c9f35
commit
1c410cc0d9
|
@ -18,6 +18,7 @@
|
|||
import os
|
||||
import time
|
||||
|
||||
import cinderclient.exceptions as cinder_exception
|
||||
import glanceclient.exc as glance_exception
|
||||
import keystoneauth1
|
||||
from .log import LOG
|
||||
|
@ -45,6 +46,26 @@ class Compute(object):
|
|||
pass
|
||||
return None
|
||||
|
||||
def find_volumes(self, cinder_client, volume_name):
|
||||
res_vol = []
|
||||
try:
|
||||
req_vols = cinder_client.volumes.list()
|
||||
for vol in req_vols:
|
||||
if volume_name in vol.name:
|
||||
res_vol.append(vol)
|
||||
return res_vol
|
||||
except (cinder_exception.NotFound, keystoneauth1.exceptions.http.NotFound):
|
||||
pass
|
||||
return None
|
||||
|
||||
def find_volume(self, cinder_client, volume_name):
|
||||
try:
|
||||
res = cinder_client.volumes.find(name=volume_name)
|
||||
return res
|
||||
except (cinder_exception.NotFound, keystoneauth1.exceptions.http.NotFound):
|
||||
pass
|
||||
return None
|
||||
|
||||
def upload_image_via_url(self, glance_client, final_image_name,
|
||||
image_url, retry_count=60):
|
||||
'''
|
||||
|
@ -82,6 +103,64 @@ class Compute(object):
|
|||
"image at the specified location %s is correct.", image_url)
|
||||
return False
|
||||
return True
|
||||
def upload_image_via_url(self, glance_client, final_image_name,
|
||||
image_url, retry_count=60):
|
||||
'''
|
||||
Directly uploads image to Nova via URL if image is not present
|
||||
'''
|
||||
retry = 0
|
||||
file_prefix = "file://"
|
||||
if not image_url.startswith(file_prefix):
|
||||
LOG.error("File format %s is not supported. It must start with %s", image_url,
|
||||
file_prefix)
|
||||
return False
|
||||
try:
|
||||
image_location = image_url.split(file_prefix)[1]
|
||||
f_image = open(image_location, 'rb')
|
||||
img = glance_client.images.create(name=str(final_image_name))
|
||||
glance_client.images.update(img.id, disk_format="qcow2",
|
||||
container_format="bare")
|
||||
glance_client.images.upload(img.id, f_image)
|
||||
|
||||
# Check for the image in glance
|
||||
while img.status in ['queued', 'saving'] and retry < retry_count:
|
||||
img = self.find_image(glance_client, image_name=img.name)
|
||||
retry = retry + 1
|
||||
LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
|
||||
time.sleep(2)
|
||||
if img.status != 'active':
|
||||
raise Exception
|
||||
except glance_exception.HTTPForbidden:
|
||||
LOG.error("Cannot upload image without admin access. Please make "
|
||||
"sure the image is uploaded and is either public or owned by you.")
|
||||
return False
|
||||
except (Exception, IOError):
|
||||
# catch the exception for file based errors.
|
||||
LOG.error("Failed while uploading the image. Please make sure the "
|
||||
"image at the specified location %s is correct.", image_url)
|
||||
return False
|
||||
return True
|
||||
|
||||
def create_volume(self, cinder_client, image, vol_name, vol_size):
|
||||
LOG.info('Creating new volume: {}'.format(vol_name))
|
||||
retry = 0
|
||||
retry_count = 60
|
||||
volume = cinder_client.volumes.create(name=str(vol_name), size=vol_size, imageRef=image.id)
|
||||
while volume.status in ['creating', 'downloading'] and retry < retry_count:
|
||||
volume = self.find_volume(cinder_client, volume.name)
|
||||
retry = retry + 1
|
||||
LOG.debug("Volume not yet active, retrying %s of %s...", retry, retry_count)
|
||||
time.sleep(2)
|
||||
if volume.status != 'available':
|
||||
raise Exception
|
||||
return volume
|
||||
|
||||
def delete_volumes(self, cinder_client, volumes):
|
||||
if isinstance(volumes, str):
|
||||
volumes = [volumes]
|
||||
for volume in volumes:
|
||||
LOG.info('Deleting volume: {}'.format(volume.name))
|
||||
cinder_client.volumes.delete(volume)
|
||||
|
||||
def delete_image(self, glance_client, img_name):
|
||||
try:
|
||||
|
@ -149,16 +228,22 @@ class Compute(object):
|
|||
# and check that it gets into the ACTIVE state
|
||||
def create_server(self, vmname, image, flavor, key_name,
|
||||
nic, sec_group, avail_zone=None, user_data=None,
|
||||
config_drive=None, files=None, retry_count=10):
|
||||
config_drive=None, files=None, retry_count=10, cinder_client=None, volume_size=None):
|
||||
|
||||
if sec_group:
|
||||
security_groups = [sec_group["name"]]
|
||||
else:
|
||||
security_groups = None
|
||||
|
||||
# Also attach the created security group for the test
|
||||
vol_map = ""
|
||||
if cinder_client:
|
||||
volume = self.create_volume(cinder_client, image, image.name + "_" + vmname + "_" + time.strftime('%d%m_%H%M%S'), volume_size)
|
||||
vol_map = [{"source_type": "volume", "boot_index": "0", "uuid": volume.id, "destination_type": "volume"}]
|
||||
image = ""
|
||||
instance = self.novaclient.servers.create(name=vmname,
|
||||
# image=image,
|
||||
image=image,
|
||||
block_device_mapping_v2=vol_map,
|
||||
flavor=flavor,
|
||||
key_name=key_name,
|
||||
nics=nic,
|
||||
|
@ -184,6 +269,7 @@ class Compute(object):
|
|||
# instance not in ACTIVE state
|
||||
LOG.error('Instance failed status=%s', instance.status)
|
||||
self.delete_server(instance)
|
||||
self.delete_volumes(volume)
|
||||
return None
|
||||
|
||||
def get_server_list(self):
|
||||
|
@ -363,7 +449,7 @@ class Compute(object):
|
|||
if len(avail_list) == 2:
|
||||
break
|
||||
LOG.info('Using hypervisors ' + ', '.join(avail_list))
|
||||
else:
|
||||
if len(avail_list) < 2:
|
||||
for host in host_list:
|
||||
# this host must be a compute node
|
||||
if host.binary != 'nova-compute' or host.state != 'up' or host.status != 'enabled':
|
||||
|
@ -374,7 +460,7 @@ class Compute(object):
|
|||
candidate = self.normalize_az_host(None, host.host)
|
||||
else:
|
||||
candidate = self.normalize_az_host(host.zone, host.host)
|
||||
if candidate:
|
||||
if candidate and candidate not in avail_list:
|
||||
avail_list.append(candidate)
|
||||
# pick first 2 matches at most
|
||||
if len(avail_list) == 2:
|
||||
|
|
|
@ -146,7 +146,9 @@ class Instance(object):
|
|||
az,
|
||||
internal_network_name,
|
||||
sec_group,
|
||||
init_file_name=None):
|
||||
init_file_name=None,
|
||||
cinder_client=None,
|
||||
volume_size=None):
|
||||
# if ssh is created it means this is a native host not a vm
|
||||
if self.ssh:
|
||||
return True
|
||||
|
@ -185,7 +187,9 @@ class Instance(object):
|
|||
user_data,
|
||||
self.config_drive,
|
||||
files,
|
||||
self.config.generic_retry_count)
|
||||
self.config.generic_retry_count,
|
||||
cinder_client,
|
||||
volume_size)
|
||||
if user_data:
|
||||
user_data.close()
|
||||
if not self.instance:
|
||||
|
|
|
@ -36,7 +36,7 @@ class PerfInstance(Instance):
|
|||
ssh_access=None, nics=None, az=None,
|
||||
management_network_name=None,
|
||||
sec_group=None,
|
||||
init_file_name=None):
|
||||
init_file_name=None, cinder_client=None, volume_size=None):
|
||||
'''Create an instance
|
||||
:return: True on success, False on error
|
||||
'''
|
||||
|
@ -44,7 +44,7 @@ class PerfInstance(Instance):
|
|||
nics, az,
|
||||
management_network_name,
|
||||
sec_group,
|
||||
init_file_name)
|
||||
init_file_name, cinder_client, volume_size)
|
||||
if not rc:
|
||||
return False
|
||||
if self.tp_tool and not self.tp_tool.install():
|
||||
|
|
24
vmtp/vmtp.py
24
vmtp/vmtp.py
|
@ -32,6 +32,7 @@ from .config import config_loads
|
|||
from . import credentials
|
||||
from .fluentd import FluentLogHandler
|
||||
from glanceclient.v2 import client as glanceclient
|
||||
from cinderclient.v2 import client as cinderclient
|
||||
from . import iperf_tool
|
||||
from keystoneclient import client as keystoneclient
|
||||
from .log import CONLOG
|
||||
|
@ -166,7 +167,9 @@ class VmtpTest(object):
|
|||
az,
|
||||
int_net['name'],
|
||||
self.sec_group,
|
||||
init_file_name=user_data_file))
|
||||
init_file_name=user_data_file,
|
||||
cinder_client=self.cinder_client,
|
||||
volume_size=self.config.volume))
|
||||
|
||||
def assert_true(self, cond):
|
||||
if not cond:
|
||||
|
@ -208,9 +211,15 @@ class VmtpTest(object):
|
|||
nova_client = novaclient.Client('2', session=sess)
|
||||
neutron = neutronclient.Client('2.0', session=sess)
|
||||
self.glance_client = glanceclient.Client('2', session=sess)
|
||||
self.cinder_client = cinderclient.Client('2', session=sess) if self.config.volume else None
|
||||
|
||||
self.comp = compute.Compute(nova_client, neutron, self.config)
|
||||
|
||||
if self.config.volume:
|
||||
volumes = self.comp.find_volumes(self.cinder_client, self.config.image_name)
|
||||
LOG.info("Removing old VMTP volumes: {}".format(volumes))
|
||||
if volumes:
|
||||
self.comp.delete_volumes(self.cinder_client, volumes)
|
||||
# Add the appropriate public key to openstack
|
||||
self.comp.init_key_pair(self.config.public_key_name, self.instance_access)
|
||||
|
||||
|
@ -295,6 +304,7 @@ class VmtpTest(object):
|
|||
# avail_list = self.comp.list_hypervisor(config.availability_zone)
|
||||
avail_list = self.comp.get_az_host_list()
|
||||
if not avail_list:
|
||||
self.teardown()
|
||||
sys.exit(5)
|
||||
|
||||
# compute the list of client vm placements to run
|
||||
|
@ -452,6 +462,11 @@ class VmtpTest(object):
|
|||
if not self.config.reuse_existing_vm and self.net:
|
||||
self.net.dispose()
|
||||
# Remove the public key
|
||||
if self.config.volume:
|
||||
volumes = self.comp.find_volumes(self.cinder_client, self.config.image_name)
|
||||
LOG.info("Removing VMTP volumes: {}".format(volumes))
|
||||
if volumes:
|
||||
self.comp.delete_volumes(self.cinder_client, volumes)
|
||||
if self.comp:
|
||||
self.comp.remove_public_key(self.config.public_key_name)
|
||||
# Finally remove the security group
|
||||
|
@ -969,6 +984,12 @@ def parse_opts_from_cli():
|
|||
help='Filename for saving VMTP logs',
|
||||
metavar='<log_file>')
|
||||
|
||||
parser.add_argument('--volume', dest='volume',
|
||||
default=None,
|
||||
action='store',
|
||||
help='create bootable volumes for instances',
|
||||
metavar='<volume>')
|
||||
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
|
||||
|
@ -1012,6 +1033,7 @@ def merge_opts_to_configs(opts):
|
|||
config.keep_first_flow_and_exit = opts.keep_first_flow_and_exit
|
||||
config.inter_node_only = opts.inter_node_only
|
||||
config.same_network_only = opts.same_network_only
|
||||
config.volume = opts.volume
|
||||
|
||||
if config.public_key_file and not os.path.isfile(config.public_key_file):
|
||||
LOG.warning('Invalid public_key_file:' + config.public_key_file)
|
||||
|
|
Loading…
Reference in New Issue