Major cleanup + fix quota update on proper tenant

Add openrc to config
Remove http cinder image upload and replace with file upload
Migrate to new API to upload cinder image from file
Remove unused class
Move tenant code to tenant class (from users.py class)
Use kloud level handles and user level handles with proper credentials
Fix oslo log silent exception append to LOG.info by clearing active exception

Change-Id: I0aaede9b9910fd70dac06d23577d5b205cbddd85
This commit is contained in:
ahothan 2017-07-01 23:55:17 -07:00
parent 4db46e2915
commit fc6f828521
8 changed files with 197 additions and 128 deletions

View File

@ -312,15 +312,3 @@ class NovaQuota(object):
def update_quota(self, **kwargs):
self.novaclient.quotas.update(self.tenant_id, **kwargs)
class CinderQuota(object):
def __init__(self, cinderclient, tenant_id):
self.cinderclient = cinderclient
self.tenant_id = tenant_id
def get(self):
return vars(self.cinderclient.quotas.get(self.tenant_id))
def update_quota(self, **kwargs):
self.cinderclient.quotas.update(self.tenant_id, **kwargs)

View File

@ -10,14 +10,26 @@
# COMMON CONFIG OPTIONS FOR BOTH SERVER AND CLIENT SIDE
# =====================================================
# The openrc file - can be overridden at the cli with the --rc argument
openrc_file:
# Name of the image to use for all test VMs (client, server and proxy)
# without the qcow2 extension
# The image name must exist in OpenStack and built with appropriate packages.
# The default test VM image is named "kloudbuster_v<version>" where
# <version> is the KloudBuster test VM image version (e.g. "kloudbuster_v3")
# <version> is the KloudBuster test VM image version (e.g. "kloudbuster_v6")
# Leave empty to use the default test VM image (recommended).
# If non empty use quotes if there are space characters in the name (e.g. 'my image')
image_name:
# KloudBuster can automatically upload a VM image if the image named by
# image_name is missing, for that you need to specify a file location where
# the image can be retrieved
#
# To upload the image as a file, download it to preferred location
# file://<full path of the image with qcow2 extension>
vm_image_file:
# Keystone admin role name (default should work in most deployments)
keystone_admin_role: "admin"

View File

@ -49,6 +49,28 @@ class Credentials(object):
auth = v2.Password(**dct)
return session.Session(auth=auth, verify=self.rc_cacert)
def get_user_session(self, username, password, tenant_name):
dct = {
'username': username,
'password': password,
'auth_url': self.rc_auth_url
}
auth = None
if self.rc_identity_api_version == 3:
dct.update({
'project_name': tenant_name,
'project_domain_name': self.rc_project_domain_name,
'user_domain_name': self.rc_user_domain_name
})
auth = v3.Password(**dct)
else:
dct.update({
'tenant_name': tenant_name
})
auth = v2.Password(**dct)
return session.Session(auth=auth, verify=self.rc_cacert)
def __parse_openrc(self, file):
export_re = re.compile('export OS_([A-Z_]*)="?(.*)')
for line in file:

View File

@ -157,8 +157,11 @@ class KBConfig(object):
def init_with_cli(self):
self.storage_mode = CONF.storage
self.multicast_mode = CONF.multicast
self.get_credentials()
self.get_configs()
# check if an openrc file was passed from config file
if not CONF.tested_rc and self.config_scale['openrc_file']:
CONF.tested_rc = self.config_scale['openrc_file']
self.get_credentials()
self.get_topo_cfg()
self.get_tenants_list()
self.update_configs()

View File

@ -19,6 +19,7 @@ from distutils.version import LooseVersion
import json
import log as logging
import redis
import sys
import threading
import time
@ -83,6 +84,9 @@ class KBRunner(object):
self.redis_obj.get("test")
success = True
except (redis.exceptions.ConnectionError):
# clear active exception to avoid the exception summary
# appended to LOG.info by oslo log
sys.exc_clear()
LOG.info("Connecting to redis server... Retry #%d/%d", retry, retry_count)
time.sleep(self.config.polling_interval)
continue
@ -133,6 +137,8 @@ class KBRunner(object):
msg = self.message_queue.popleft()
except IndexError:
# No new message, commands are in executing
# clear active exc to prevent LOG pollution
sys.exc_clear()
break
payload = eval(msg['data'])

View File

@ -25,7 +25,8 @@ import webbrowser
import base_compute
import base_network
import glanceclient.exc as glance_exception
from cinderclient import client as cinderclient
from glanceclient import exc as glance_exception
from glanceclient.v2 import client as glanceclient
from kb_config import KBConfig
from kb_res_logger import KBResLogger
@ -36,9 +37,11 @@ from kb_runner_storage import KBRunner_Storage
from kb_scheduler import KBScheduler
import kb_vm_agent
import keystoneauth1
from keystoneclient.v2_0 import client as keystoneclient
import log as logging
from neutronclient.neutron import client as neutronclient
from novaclient import client as novaclient
from oslo_config import cfg
import pbr.version
@ -64,6 +67,7 @@ class Kloud(object):
self.reusing_tenants = reusing_tenants
self.storage_mode = storage_mode
self.multicast_mode = multicast_mode
self.credentials = cred
self.osclient_session = cred.get_session()
self.flavor_to_use = None
self.vm_up_count = 0
@ -78,7 +82,15 @@ class Kloud(object):
self.placement_az = scale_cfg['availability_zone'] \
if scale_cfg['availability_zone'] else None
self.exc_info = None
# these client handles use the kloudbuster credentials (usually admin)
# to do tenant creation, tenant nova+cinder quota allocation and the like
self.keystone = keystoneclient.Client(session=self.osclient_session)
self.neutron_client = neutronclient.Client('2.0', endpoint_type='publicURL',
session=self.osclient_session)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL',
session=self.osclient_session)
self.cinder_client = cinderclient.Client('2', endpoint_type='publicURL',
session=self.osclient_session)
LOG.info("Creating kloud: " + self.prefix)
if self.placement_az:
LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
@ -306,46 +318,70 @@ class KloudBuster(object):
def check_and_upload_images(self, retry_count=150):
retry = 0
creds_list = [self.server_cred.get_session(),
self.client_cred.get_session()]
creds_dict = dict(zip(['Server kloud', 'Client kloud'], creds_list))
img_name_dict = dict(zip(['Server kloud', 'Client kloud'],
[self.server_cfg.image_name, self.client_cfg.image_name]))
for kloud, sess in creds_dict.items():
image_location = None
image_name = self.client_cfg.image_name
image_url = self.client_cfg.vm_image_file
kloud_name_list = ['Server kloud', 'Client kloud']
session_list = [self.server_cred.get_session(),
self.client_cred.get_session()]
for kloud, sess in zip(kloud_name_list, session_list):
glance_client = glanceclient.Client('2', session=sess)
try:
# Search for the image
img = glance_client.images.list(filters={'name': img_name_dict[kloud]}).next()
img = glance_client.images.list(filters={'name': image_name}).next()
continue
except StopIteration:
pass
sys.exc_clear()
# Trying to upload images
kb_image_name = kb_vm_agent.get_image_name() + '.qcow2'
image_url = 'http://storage.apps.openstack.org/images/%s' % kb_image_name
LOG.info("Image is not found in %s, uploading from OpenStack App Store..." % kloud)
LOG.info("KloudBuster VM Image is not found in %s, trying to upload it..." % kloud)
if not image_location:
if not image_url:
LOG.error('Configuration file is missing a VM image URL (vm_image_name)')
return False
file_prefix = 'file://'
if not image_url.startswith(file_prefix):
LOG.error('vm_image_name (%s) must start with "%s", aborting' %
(image_url, file_prefix))
return False
image_location = image_url.split(file_prefix)[1]
retry = 0
try:
img = glance_client.images.create(name=img_name_dict[kloud],
disk_format="qcow2",
container_format="bare",
is_public=True,
copy_from=image_url)
LOG.info("Uploading VM Image from %s..." % image_location)
with open(image_location) as f_image:
img = glance_client.images.create(name=image_name,
disk_format="qcow2",
container_format="bare",
visibility="public")
glance_client.images.upload(img.id, image_data=f_image)
# Check for the image in glance
while img.status in ['queued', 'saving'] and retry < retry_count:
img = glance_client.images.find(name=img.name)
retry = retry + 1
img = glance_client.images.get(img.id)
retry += 1
LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
time.sleep(2)
if img.status != 'active':
raise Exception
LOG.error("Image uploaded but too long to get to active state")
raise Exception("Image update active state timeout")
except glance_exception.HTTPForbidden:
LOG.error("Cannot upload image without admin access. Please make sure the "
"image is uploaded and is either public or owned by you.")
LOG.error("Cannot upload image without admin access. Please make "
"sure the image is uploaded and is either public or owned by you.")
return False
except IOError as exc:
# catch the exception for file based errors.
LOG.error("Failed while uploading the image. Please make sure the "
"image at the specified location %s is correct: %s",
image_url, str(exc))
return False
except keystoneauth1.exceptions.http.NotFound as exc:
LOG.error("Authentication error while uploading the image: " + str(exc))
return False
except Exception:
LOG.error("Failed while uploading the image, please make sure the cloud "
"under test has the access to URL: %s." % image_url)
LOG.error(traceback.format_exc())
LOG.error("Failed while uploading the image: %s", str(exc))
return False
return True
return True
def print_provision_info(self):
@ -611,7 +647,8 @@ class KloudBuster(object):
cleanup_flag = False
try:
cleanup_flag = self.testing_kloud.delete_resources()
if self.testing_kloud:
cleanup_flag = self.testing_kloud.delete_resources()
except Exception:
traceback.print_exc()
if not cleanup_flag:

View File

@ -12,12 +12,23 @@
# License for the specific language governing permissions and limitations
# under the License.
import base_compute
import base_network
import base_storage
from keystoneclient import exceptions as keystone_exception
import log as logging
import sys
import users
LOG = logging.getLogger(__name__)
class KBFlavorCheckException(Exception):
pass
class KBQuotaCheckException(Exception):
pass
class Tenant(object):
"""
Holds the tenant resources
@ -61,7 +72,7 @@ class Tenant(object):
LOG.info("Creating tenant: " + self.tenant_name)
tenant_object = \
self.tenant_api.create(self.tenant_name,
domain="default",
# domain="default",
description="KloudBuster tenant",
enabled=True)
return tenant_object
@ -76,11 +87,76 @@ class Tenant(object):
# Should never come here
raise Exception()
def update_quota(self):
nova_quota = base_compute.NovaQuota(self.kloud.nova_client, self.tenant_id)
nova_quota.update_quota(**self.tenant_quota['nova'])
if self.kloud.storage_mode:
cinder_quota = base_storage.CinderQuota(self.kloud.cinder_client, self.tenant_id)
cinder_quota.update_quota(**self.tenant_quota['cinder'])
neutron_quota = base_network.NeutronQuota(self.kloud.neutron_client, self.tenant_id)
neutron_quota.update_quota(self.tenant_quota['neutron'])
def check_quota(self):
# Flavor check
flavor_manager = base_compute.Flavor(self.kloud.nova_client)
find_flag = False
fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
for flavor in flavor_manager.list():
flavor = vars(flavor)
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
continue
if flavor['vcpus'] < fcand['vcpus']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] < fcand['ram']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] == fcand['ram'] and\
flavor['disk'] < fcand['disk']:
fcand = flavor
find_flag = True
if find_flag:
LOG.info('Automatically selects flavor %s to instantiate VMs.' % fcand['name'])
self.kloud.flavor_to_use = fcand['name']
else:
LOG.error('Cannot find a flavor which meets the minimum '
'requirements to instantiate VMs.')
raise KBFlavorCheckException()
# Nova/Cinder/Neutron quota check
tenant_id = self.tenant_id
meet_quota = True
for quota_type in ['nova', 'cinder', 'neutron']:
if quota_type == 'nova':
quota_manager = base_compute.NovaQuota(self.kloud.nova_client, tenant_id)
elif quota_type == 'cinder':
quota_manager = base_storage.CinderQuota(self.kloud.cinder_client, tenant_id)
else:
quota_manager = base_network.NeutronQuota(self.kloud.neutron_client, tenant_id)
meet_quota = True
quota = quota_manager.get()
for key, value in self.tenant_quota[quota_type].iteritems():
if quota[key] < value:
meet_quota = False
break
if not meet_quota:
LOG.error('%s quota is too small. Minimum requirement: %s.' %
(quota_type, self.tenant_quota[quota_type]))
raise KBQuotaCheckException()
def create_resources(self):
"""
Creates all the entities associated with
a user offloads tasks to user class
"""
if self.kloud.reusing_tenants:
self.check_quota()
else:
self.update_quota()
if self.reusing_users:
user_name = self.reusing_users['username']
password = self.reusing_users['password']

View File

@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import sys
import base_compute
import base_network
from cinderclient import client as cinderclient
@ -24,12 +22,6 @@ from novaclient import client as novaclient
LOG = logging.getLogger(__name__)
class KBFlavorCheckException(Exception):
pass
class KBQuotaCheckException(Exception):
pass
class User(object):
"""
User class that stores router list
@ -49,10 +41,17 @@ class User(object):
self.tenant = tenant
self.res_logger = tenant.res_logger
self.router_list = []
# Store the nova, neutron and cinder client
self.nova_client = None
self.neutron_client = None
self.cinder_client = None
# nova, neutron and cinder client for this user
session = self.tenant.kloud.credentials.get_user_session(user_name, password,
tenant.tenant_name)
# Create nova/neutron/cinder clients to be used for all operations
self.neutron_client = neutronclient.Client('2.0', endpoint_type='publicURL',
session=session)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL',
http_log_debug=True, session=session)
self.cinder_client = cinderclient.Client('2', endpoint_type='publicURL',
session=session)
# Each user is associated to 1 key pair at most
self.key_pair = None
self.key_name = None
@ -127,86 +126,12 @@ class User(object):
return flag
def update_tenant_quota(self, tenant_quota):
nova_quota = base_compute.NovaQuota(self.nova_client, self.tenant.tenant_id)
nova_quota.update_quota(**tenant_quota['nova'])
if self.tenant.kloud.storage_mode:
cinder_quota = base_compute.CinderQuota(self.cinder_client, self.tenant.tenant_id)
cinder_quota.update_quota(**tenant_quota['cinder'])
neutron_quota = base_network.NeutronQuota(self.neutron_client, self.tenant.tenant_id)
neutron_quota.update_quota(tenant_quota['neutron'])
def check_resources_quota(self):
# Flavor check
flavor_manager = base_compute.Flavor(self.nova_client)
find_flag = False
fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
for flavor in flavor_manager.list():
flavor = vars(flavor)
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
continue
if flavor['vcpus'] < fcand['vcpus']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] < fcand['ram']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] == fcand['ram'] and\
flavor['disk'] < fcand['disk']:
fcand = flavor
find_flag = True
if find_flag:
LOG.info('Automatically selects flavor %s to instantiate VMs.' % fcand['name'])
self.tenant.kloud.flavor_to_use = fcand['name']
else:
LOG.error('Cannot find a flavor which meets the minimum '
'requirements to instantiate VMs.')
raise KBFlavorCheckException()
# Nova/Cinder/Neutron quota check
tenant_id = self.tenant.tenant_id
meet_quota = True
for quota_type in ['nova', 'cinder', 'neutron']:
if quota_type == 'nova':
quota_manager = base_compute.NovaQuota(self.nova_client, tenant_id)
elif quota_type == 'cinder':
quota_manager = base_compute.CinderQuota(self.cinder_client, tenant_id)
else:
quota_manager = base_network.NeutronQuota(self.neutron_client, tenant_id)
meet_quota = True
quota = quota_manager.get()
for key, value in self.tenant.tenant_quota[quota_type].iteritems():
if quota[key] < value:
meet_quota = False
break
if not meet_quota:
LOG.error('%s quota is too small. Minimum requirement: %s.' %
(quota_type, self.tenant.tenant_quota[quota_type]))
raise KBQuotaCheckException()
def create_resources(self):
"""
Creates all the User elements associated with a User
1. Creates the routers
2. Creates the neutron and nova client objects
"""
session = self.tenant.kloud.osclient_session
# Create nova/neutron/cinder clients to be used for all operations
self.neutron_client = neutronclient.Client('2.0', endpoint_type='publicURL',
session=session)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL',
http_log_debug=True, session=session)
self.cinder_client = cinderclient.Client('2', endpoint_type='publicURL',
session=session)
if self.tenant.kloud.reusing_tenants:
self.check_resources_quota()
else:
self.update_tenant_quota(self.tenant.tenant_quota)
config_scale = self.tenant.kloud.scale_cfg