Sync charm-helpers.
This commit is contained in:
parent
35e5a1b5b9
commit
c1d7e8119f
@ -79,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Openstack subordinate charms do not expose an origin option as that
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# is controlled by the principle
|
# as that is controlled by the principle.
|
||||||
ignore = ['neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -148,3 +148,36 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
else:
|
else:
|
||||||
return releases[self.series]
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
||||||
|
@ -14,16 +14,20 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
import heatclient.v1.client as heat_client
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import swiftclient
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
@ -171,6 +175,16 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
|
password, tenant):
|
||||||
|
"""Authenticates admin user with cinder."""
|
||||||
|
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||||
|
service_ip = \
|
||||||
|
keystone_sentry.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
@ -212,9 +226,29 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return nova_client.Client(username=user, api_key=password,
|
return nova_client.Client(username=user, api_key=password,
|
||||||
project_id=tenant, auth_url=ep)
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with swift api."""
|
||||||
|
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return swiftclient.Connection(authurl=ep,
|
||||||
|
user=user,
|
||||||
|
key=password,
|
||||||
|
tenant_name=tenant,
|
||||||
|
auth_version='2.0')
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
def create_cirros_image(self, glance, image_name):
|
||||||
"""Download the latest cirros image and upload it to glance."""
|
"""Download the latest cirros image and upload it to glance,
|
||||||
self.log.debug('Creating glance image ({})...'.format(image_name))
|
validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param glance: pointer to authenticated glance connection
|
||||||
|
:param image_name: display name for new image
|
||||||
|
:returns: glance image pointer
|
||||||
|
"""
|
||||||
|
self.log.debug('Creating glance cirros image '
|
||||||
|
'({})...'.format(image_name))
|
||||||
|
|
||||||
|
# Download cirros image
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
if http_proxy:
|
if http_proxy:
|
||||||
@ -223,33 +257,51 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
else:
|
else:
|
||||||
opener = urllib.FancyURLopener()
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||||
version = f.read().strip()
|
version = f.read().strip()
|
||||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||||
local_path = os.path.join('tests', cirros_img)
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
if not os.path.exists(local_path):
|
||||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||||
version, cirros_img)
|
version, cirros_img)
|
||||||
opener.retrieve(cirros_url, local_path)
|
opener.retrieve(cirros_url, local_path)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
# Create glance image
|
||||||
with open(local_path) as f:
|
with open(local_path) as f:
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
disk_format='qcow2',
|
disk_format='qcow2',
|
||||||
container_format='bare', data=f)
|
container_format='bare', data=f)
|
||||||
count = 1
|
|
||||||
status = image.status
|
|
||||||
while status != 'active' and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
image = glance.images.get(image.id)
|
|
||||||
status = image.status
|
|
||||||
self.log.debug('image status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'active':
|
# Wait for image to reach active status
|
||||||
self.log.error('image creation timed out')
|
img_id = image.id
|
||||||
return None
|
ret = self.resource_reaches_status(glance.images, img_id,
|
||||||
|
expected_stat='active',
|
||||||
|
msg='Image status wait')
|
||||||
|
if not ret:
|
||||||
|
msg = 'Glance image failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new image
|
||||||
|
self.log.debug('Validating image attributes...')
|
||||||
|
val_img_name = glance.images.get(img_id).name
|
||||||
|
val_img_stat = glance.images.get(img_id).status
|
||||||
|
val_img_pub = glance.images.get(img_id).is_public
|
||||||
|
val_img_cfmt = glance.images.get(img_id).container_format
|
||||||
|
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||||
|
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||||
|
'container fmt:{} disk fmt:{}'.format(
|
||||||
|
val_img_name, val_img_pub, img_id,
|
||||||
|
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||||
|
|
||||||
|
if val_img_name == image_name and val_img_stat == 'active' \
|
||||||
|
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||||
|
and val_img_dfmt == 'qcow2':
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
@ -260,22 +312,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
'delete_resource instead of delete_image.')
|
'delete_resource instead of delete_image.')
|
||||||
self.log.debug('Deleting glance image ({})...'.format(image))
|
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||||
num_before = len(list(glance.images.list()))
|
return self.delete_resource(glance.images, image, msg='glance image')
|
||||||
glance.images.delete(image)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
self.log.debug('number of images: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('image deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
"""Create the specified instance."""
|
"""Create the specified instance."""
|
||||||
@ -308,22 +345,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
'delete_resource instead of delete_instance.')
|
'delete_resource instead of delete_instance.')
|
||||||
self.log.debug('Deleting instance ({})...'.format(instance))
|
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||||
num_before = len(list(nova.servers.list()))
|
return self.delete_resource(nova.servers, instance,
|
||||||
nova.servers.delete(instance)
|
msg='nova instance')
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
self.log.debug('number of instances: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('instance deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||||
"""Create a new keypair, or return pointer if it already exists."""
|
"""Create a new keypair, or return pointer if it already exists."""
|
||||||
@ -339,6 +362,88 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
_keypair = nova.keypairs.create(name=keypair_name)
|
_keypair = nova.keypairs.create(name=keypair_name)
|
||||||
return _keypair
|
return _keypair
|
||||||
|
|
||||||
|
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||||
|
img_id=None, src_vol_id=None, snap_id=None):
|
||||||
|
"""Create cinder volume, optionally from a glance image, OR
|
||||||
|
optionally as a clone of an existing volume, OR optionally
|
||||||
|
from a snapshot. Wait for the new volume status to reach
|
||||||
|
the expected status, validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param vol_name: cinder volume display name
|
||||||
|
:param vol_size: size in gigabytes
|
||||||
|
:param img_id: optional glance image id
|
||||||
|
:param src_vol_id: optional source volume id to clone
|
||||||
|
:param snap_id: optional snapshot id to use
|
||||||
|
:returns: cinder volume pointer
|
||||||
|
"""
|
||||||
|
# Handle parameter input and avoid impossible combinations
|
||||||
|
if img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume from image
|
||||||
|
self.log.debug('Creating cinder volume from glance image...')
|
||||||
|
bootable = 'true'
|
||||||
|
elif src_vol_id and not img_id and not snap_id:
|
||||||
|
# Clone an existing volume
|
||||||
|
self.log.debug('Cloning cinder volume...')
|
||||||
|
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||||
|
elif snap_id and not src_vol_id and not img_id:
|
||||||
|
# Create volume from snapshot
|
||||||
|
self.log.debug('Creating cinder volume from snapshot...')
|
||||||
|
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||||
|
vol_size = snap.size
|
||||||
|
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||||
|
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||||
|
elif not img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume
|
||||||
|
self.log.debug('Creating cinder volume...')
|
||||||
|
bootable = 'false'
|
||||||
|
else:
|
||||||
|
# Impossible combination of parameters
|
||||||
|
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||||
|
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||||
|
img_id, src_vol_id,
|
||||||
|
snap_id))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Create new volume
|
||||||
|
try:
|
||||||
|
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||||
|
imageRef=img_id,
|
||||||
|
size=vol_size,
|
||||||
|
source_volid=src_vol_id,
|
||||||
|
snapshot_id=snap_id)
|
||||||
|
vol_id = vol_new.id
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to create volume: {}'.format(e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Wait for volume to reach available status
|
||||||
|
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||||
|
expected_stat="available",
|
||||||
|
msg="Volume status wait")
|
||||||
|
if not ret:
|
||||||
|
msg = 'Cinder volume failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new volume
|
||||||
|
self.log.debug('Validating volume attributes...')
|
||||||
|
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||||
|
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||||
|
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||||
|
val_vol_size = cinder.volumes.get(vol_id).size
|
||||||
|
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||||
|
'{} size:{}'.format(val_vol_name, vol_id,
|
||||||
|
val_vol_stat, val_vol_boot,
|
||||||
|
val_vol_size))
|
||||||
|
|
||||||
|
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||||
|
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return vol_new
|
||||||
|
|
||||||
def delete_resource(self, resource, resource_id,
|
def delete_resource(self, resource, resource_id,
|
||||||
msg="resource", max_wait=120):
|
msg="resource", max_wait=120):
|
||||||
"""Delete one openstack resource, such as one instance, keypair,
|
"""Delete one openstack resource, such as one instance, keypair,
|
||||||
@ -350,6 +455,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
:param max_wait: maximum wait time in seconds
|
:param max_wait: maximum wait time in seconds
|
||||||
:returns: True if successful, otherwise False
|
:returns: True if successful, otherwise False
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Deleting OpenStack resource '
|
||||||
|
'{} ({})'.format(resource_id, msg))
|
||||||
num_before = len(list(resource.list()))
|
num_before = len(list(resource.list()))
|
||||||
resource.delete(resource_id)
|
resource.delete(resource_id)
|
||||||
|
|
||||||
@ -411,3 +518,87 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('{} never reached expected status: '
|
self.log.debug('{} never reached expected status: '
|
||||||
'{}'.format(resource_id, expected_stat))
|
'{}'.format(resource_id, expected_stat))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_ceph_osd_id_cmd(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return ("`initctl list | grep 'ceph-osd ' | "
|
||||||
|
"awk 'NR=={} {{ print $2 }}' | "
|
||||||
|
"grep -o '[0-9]*'`".format(index + 1))
|
||||||
|
|
||||||
|
def get_ceph_pools(self, sentry_unit):
|
||||||
|
"""Return a dict of ceph pools from a single ceph unit, with
|
||||||
|
pool name as keys, pool id as vals."""
|
||||||
|
pools = {}
|
||||||
|
cmd = 'sudo ceph osd lspools'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||||
|
for pool in str(output).split(','):
|
||||||
|
pool_id_name = pool.split(' ')
|
||||||
|
if len(pool_id_name) == 2:
|
||||||
|
pool_id = pool_id_name[0]
|
||||||
|
pool_name = pool_id_name[1]
|
||||||
|
pools[pool_name] = int(pool_id)
|
||||||
|
|
||||||
|
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||||
|
pools))
|
||||||
|
return pools
|
||||||
|
|
||||||
|
def get_ceph_df(self, sentry_unit):
|
||||||
|
"""Return dict of ceph df json output, including ceph pool state.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:returns: Dict of ceph df output
|
||||||
|
"""
|
||||||
|
cmd = 'sudo ceph df --format=json'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return json.loads(output)
|
||||||
|
|
||||||
|
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||||
|
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||||
|
pool name, object count and disk space used for the specified
|
||||||
|
pool ID number.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param pool_id: Ceph pool ID
|
||||||
|
:returns: List of pool name, object count, kb disk space used
|
||||||
|
"""
|
||||||
|
df = self.get_ceph_df(sentry_unit)
|
||||||
|
pool_name = df['pools'][pool_id]['name']
|
||||||
|
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||||
|
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||||
|
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||||
|
'{} kb used'.format(pool_name, pool_id,
|
||||||
|
obj_count, kb_used))
|
||||||
|
return pool_name, obj_count, kb_used
|
||||||
|
|
||||||
|
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||||
|
"""Validate ceph pool samples taken over time, such as pool
|
||||||
|
object counts or pool kb used, before adding, after adding, and
|
||||||
|
after deleting items which affect those pool attributes. The
|
||||||
|
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||||
|
to be less than the 2nd.
|
||||||
|
|
||||||
|
:param samples: List containing 3 data samples
|
||||||
|
:param sample_type: String for logging and usage context
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
original, created, deleted = range(3)
|
||||||
|
if samples[created] <= samples[original] or \
|
||||||
|
samples[deleted] >= samples[created]:
|
||||||
|
return ('Ceph {} samples ({}) '
|
||||||
|
'unexpected.'.format(sample_type, samples))
|
||||||
|
else:
|
||||||
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
|
'{}'.format(sample_type, samples))
|
||||||
|
return None
|
||||||
|
@ -122,21 +122,24 @@ def config_flags_parser(config_flags):
|
|||||||
of specifying multiple key value pairs within the same string. For
|
of specifying multiple key value pairs within the same string. For
|
||||||
example, a string in the format of 'key1=value1, key2=value2' will
|
example, a string in the format of 'key1=value1, key2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
{'key1': 'value1',
|
|
||||||
'key2': 'value2'}.
|
{'key1': 'value1',
|
||||||
|
'key2': 'value2'}.
|
||||||
|
|
||||||
2. A string in the above format, but supporting a comma-delimited list
|
2. A string in the above format, but supporting a comma-delimited list
|
||||||
of values for the same key. For example, a string in the format of
|
of values for the same key. For example, a string in the format of
|
||||||
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
||||||
{'key1', 'value1',
|
|
||||||
'key2', 'value2,value3,value4'}
|
{'key1', 'value1',
|
||||||
|
'key2', 'value2,value3,value4'}
|
||||||
|
|
||||||
3. A string containing a colon character (:) prior to an equal
|
3. A string containing a colon character (:) prior to an equal
|
||||||
character (=) will be treated as yaml and parsed as such. This can be
|
character (=) will be treated as yaml and parsed as such. This can be
|
||||||
used to specify more complex key value pairs. For example,
|
used to specify more complex key value pairs. For example,
|
||||||
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
{'key1', 'subkey1=value1, subkey2=value2'}
|
|
||||||
|
{'key1', 'subkey1=value1, subkey2=value2'}
|
||||||
|
|
||||||
The provided config_flags string may be a list of comma-separated values
|
The provided config_flags string may be a list of comma-separated values
|
||||||
which themselves may be comma-separated list of values.
|
which themselves may be comma-separated list of values.
|
||||||
@ -891,8 +894,6 @@ class NeutronContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self._ensure_packages()
|
|
||||||
|
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -5,11 +5,11 @@
|
|||||||
###############################################################################
|
###############################################################################
|
||||||
[global]
|
[global]
|
||||||
{% if auth -%}
|
{% if auth -%}
|
||||||
auth_supported = {{ auth }}
|
auth_supported = {{ auth }}
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
mon host = {{ mon_hosts }}
|
mon host = {{ mon_hosts }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
log to syslog = {{ use_syslog }}
|
log to syslog = {{ use_syslog }}
|
||||||
err to syslog = {{ use_syslog }}
|
err to syslog = {{ use_syslog }}
|
||||||
clog to syslog = {{ use_syslog }}
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
||||||
|
@ -522,6 +522,7 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|||||||
Clone/install all specified OpenStack repositories.
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
The expected format of projects_yaml is:
|
The expected format of projects_yaml is:
|
||||||
|
|
||||||
repositories:
|
repositories:
|
||||||
- {name: keystone,
|
- {name: keystone,
|
||||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||||
@ -529,11 +530,13 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|||||||
- {name: requirements,
|
- {name: requirements,
|
||||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||||
branch: 'stable/icehouse'}
|
branch: 'stable/icehouse'}
|
||||||
|
|
||||||
directory: /mnt/openstack-git
|
directory: /mnt/openstack-git
|
||||||
http_proxy: squid-proxy-url
|
http_proxy: squid-proxy-url
|
||||||
https_proxy: squid-proxy-url
|
https_proxy: squid-proxy-url
|
||||||
|
|
||||||
The directory, http_proxy, and https_proxy keys are optional.
|
The directory, http_proxy, and https_proxy keys are optional.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
global requirements_dir
|
global requirements_dir
|
||||||
parent_dir = '/mnt/openstack-git'
|
parent_dir = '/mnt/openstack-git'
|
||||||
@ -555,10 +558,11 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|||||||
|
|
||||||
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
# Upgrade setuptools from default virtualenv version. The default version
|
# Upgrade setuptools and pip from default virtualenv versions. The default
|
||||||
# in trusty breaks update.py in global requirements master branch.
|
# versions in trusty break master OpenStack branch deployments.
|
||||||
pip_install('setuptools', upgrade=True, proxy=http_proxy,
|
for p in ['pip', 'setuptools']:
|
||||||
venv=os.path.join(parent_dir, 'venv'))
|
pip_install(p, upgrade=True, proxy=http_proxy,
|
||||||
|
venv=os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
for p in projects['repositories']:
|
for p in projects['repositories']:
|
||||||
repo = p['repository']
|
repo = p['repository']
|
||||||
|
@ -60,12 +60,12 @@ KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
|||||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
CEPH_CONF = """[global]
|
CEPH_CONF = """[global]
|
||||||
auth supported = {auth}
|
auth supported = {auth}
|
||||||
keyring = {keyring}
|
keyring = {keyring}
|
||||||
mon host = {mon_hosts}
|
mon host = {mon_hosts}
|
||||||
log to syslog = {use_syslog}
|
log to syslog = {use_syslog}
|
||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,9 @@
|
|||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
from distutils.version import LooseVersion
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
import glob
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import yaml
|
import yaml
|
||||||
@ -242,29 +244,7 @@ class Config(dict):
|
|||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path):
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
|
atexit(self._implicit_save)
|
||||||
def __getitem__(self, key):
|
|
||||||
"""For regular dict lookups, check the current juju config first,
|
|
||||||
then the previous (saved) copy. This ensures that user-saved values
|
|
||||||
will be returned by a dict lookup.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return dict.__getitem__(self, key)
|
|
||||||
except KeyError:
|
|
||||||
return (self._prev_dict or {})[key]
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
try:
|
|
||||||
return self[key]
|
|
||||||
except KeyError:
|
|
||||||
return default
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
prev_keys = []
|
|
||||||
if self._prev_dict is not None:
|
|
||||||
prev_keys = self._prev_dict.keys()
|
|
||||||
return list(set(prev_keys + list(dict.keys(self))))
|
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
def load_previous(self, path=None):
|
||||||
"""Load previous copy of config from disk.
|
"""Load previous copy of config from disk.
|
||||||
@ -283,6 +263,9 @@ class Config(dict):
|
|||||||
self.path = path or self.path
|
self.path = path or self.path
|
||||||
with open(self.path) as f:
|
with open(self.path) as f:
|
||||||
self._prev_dict = json.load(f)
|
self._prev_dict = json.load(f)
|
||||||
|
for k, v in self._prev_dict.items():
|
||||||
|
if k not in self:
|
||||||
|
self[k] = v
|
||||||
|
|
||||||
def changed(self, key):
|
def changed(self, key):
|
||||||
"""Return True if the current value for this key is different from
|
"""Return True if the current value for this key is different from
|
||||||
@ -314,13 +297,13 @@ class Config(dict):
|
|||||||
instance.
|
instance.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
|
||||||
for k, v in six.iteritems(self._prev_dict):
|
|
||||||
if k not in self:
|
|
||||||
self[k] = v
|
|
||||||
with open(self.path, 'w') as f:
|
with open(self.path, 'w') as f:
|
||||||
json.dump(self, f)
|
json.dump(self, f)
|
||||||
|
|
||||||
|
def _implicit_save(self):
|
||||||
|
if self.implicit_save:
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def config(scope=None):
|
def config(scope=None):
|
||||||
@ -587,10 +570,14 @@ class Hooks(object):
|
|||||||
hooks.execute(sys.argv)
|
hooks.execute(sys.argv)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config_save=True):
|
def __init__(self, config_save=None):
|
||||||
super(Hooks, self).__init__()
|
super(Hooks, self).__init__()
|
||||||
self._hooks = {}
|
self._hooks = {}
|
||||||
self._config_save = config_save
|
|
||||||
|
# For unknown reasons, we allow the Hooks constructor to override
|
||||||
|
# config().implicit_save.
|
||||||
|
if config_save is not None:
|
||||||
|
config().implicit_save = config_save
|
||||||
|
|
||||||
def register(self, name, function):
|
def register(self, name, function):
|
||||||
"""Register a hook"""
|
"""Register a hook"""
|
||||||
@ -598,13 +585,16 @@ class Hooks(object):
|
|||||||
|
|
||||||
def execute(self, args):
|
def execute(self, args):
|
||||||
"""Execute a registered hook based on args[0]"""
|
"""Execute a registered hook based on args[0]"""
|
||||||
|
_run_atstart()
|
||||||
hook_name = os.path.basename(args[0])
|
hook_name = os.path.basename(args[0])
|
||||||
if hook_name in self._hooks:
|
if hook_name in self._hooks:
|
||||||
self._hooks[hook_name]()
|
try:
|
||||||
if self._config_save:
|
self._hooks[hook_name]()
|
||||||
cfg = config()
|
except SystemExit as x:
|
||||||
if cfg.implicit_save:
|
if x.code is None or x.code == 0:
|
||||||
cfg.save()
|
_run_atexit()
|
||||||
|
raise
|
||||||
|
_run_atexit()
|
||||||
else:
|
else:
|
||||||
raise UnregisteredHookError(hook_name)
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
@ -732,13 +722,80 @@ def leader_get(attribute=None):
|
|||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
def leader_set(settings=None, **kwargs):
|
def leader_set(settings=None, **kwargs):
|
||||||
"""Juju leader set value(s)"""
|
"""Juju leader set value(s)"""
|
||||||
log("Juju leader-set '%s'" % (settings), level=DEBUG)
|
# Don't log secrets.
|
||||||
|
# log("Juju leader-set '%s'" % (settings), level=DEBUG)
|
||||||
cmd = ['leader-set']
|
cmd = ['leader-set']
|
||||||
settings = settings or {}
|
settings = settings or {}
|
||||||
settings.update(kwargs)
|
settings.update(kwargs)
|
||||||
for k, v in settings.iteritems():
|
for k, v in settings.items():
|
||||||
if v is None:
|
if v is None:
|
||||||
cmd.append('{}='.format(k))
|
cmd.append('{}='.format(k))
|
||||||
else:
|
else:
|
||||||
cmd.append('{}={}'.format(k, v))
|
cmd.append('{}={}'.format(k, v))
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def juju_version():
|
||||||
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
|
||||||
|
jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
|
||||||
|
return subprocess.check_output([jujud, 'version'],
|
||||||
|
universal_newlines=True).strip()
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def has_juju_version(minimum_version):
|
||||||
|
"""Return True if the Juju version is at least the provided version"""
|
||||||
|
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||||
|
|
||||||
|
|
||||||
|
_atexit = []
|
||||||
|
_atstart = []
|
||||||
|
|
||||||
|
|
||||||
|
def atstart(callback, *args, **kwargs):
|
||||||
|
'''Schedule a callback to run before the main hook.
|
||||||
|
|
||||||
|
Callbacks are run in the order they were added.
|
||||||
|
|
||||||
|
This is useful for modules and classes to perform initialization
|
||||||
|
and inject behavior. In particular:
|
||||||
|
|
||||||
|
- Run common code before all of your hooks, such as logging
|
||||||
|
the hook name or interesting relation data.
|
||||||
|
- Defer object or module initialization that requires a hook
|
||||||
|
context until we know there actually is a hook context,
|
||||||
|
making testing easier.
|
||||||
|
- Rather than requiring charm authors to include boilerplate to
|
||||||
|
invoke your helper's behavior, have it run automatically if
|
||||||
|
your object is instantiated or module imported.
|
||||||
|
|
||||||
|
This is not at all useful after your hook framework as been launched.
|
||||||
|
'''
|
||||||
|
global _atstart
|
||||||
|
_atstart.append((callback, args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def atexit(callback, *args, **kwargs):
|
||||||
|
'''Schedule a callback to run on successful hook completion.
|
||||||
|
|
||||||
|
Callbacks are run in the reverse order that they were added.'''
|
||||||
|
_atexit.append((callback, args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def _run_atstart():
|
||||||
|
'''Hook frameworks must invoke this before running the main hook body.'''
|
||||||
|
global _atstart
|
||||||
|
for callback, args, kwargs in _atstart:
|
||||||
|
callback(*args, **kwargs)
|
||||||
|
del _atstart[:]
|
||||||
|
|
||||||
|
|
||||||
|
def _run_atexit():
|
||||||
|
'''Hook frameworks must invoke this after the main hook body has
|
||||||
|
successfully completed. Do not invoke it if the hook fails.'''
|
||||||
|
global _atexit
|
||||||
|
for callback, args, kwargs in reversed(_atexit):
|
||||||
|
callback(*args, **kwargs)
|
||||||
|
del _atexit[:]
|
||||||
|
@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False):
|
|||||||
return service_result
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
|
def service_pause(service_name, init_dir=None):
|
||||||
|
"""Pause a system service.
|
||||||
|
|
||||||
|
Stop it, and prevent it from starting again at boot."""
|
||||||
|
if init_dir is None:
|
||||||
|
init_dir = "/etc/init"
|
||||||
|
stopped = service_stop(service_name)
|
||||||
|
# XXX: Support systemd too
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.conf.override'.format(service_name))
|
||||||
|
with open(override_path, 'w') as fh:
|
||||||
|
fh.write("manual\n")
|
||||||
|
return stopped
|
||||||
|
|
||||||
|
|
||||||
|
def service_resume(service_name, init_dir=None):
|
||||||
|
"""Resume a system service.
|
||||||
|
|
||||||
|
Reenable starting again at boot. Start the service"""
|
||||||
|
# XXX: Support systemd too
|
||||||
|
if init_dir is None:
|
||||||
|
init_dir = "/etc/init"
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.conf.override'.format(service_name))
|
||||||
|
if os.path.exists(override_path):
|
||||||
|
os.unlink(override_path)
|
||||||
|
started = service_start(service_name)
|
||||||
|
return started
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name):
|
||||||
"""Control a system service"""
|
"""Control a system service"""
|
||||||
cmd = ['service', service_name, action]
|
cmd = ['service', service_name, action]
|
||||||
@ -140,11 +170,7 @@ def add_group(group_name, system_group=False):
|
|||||||
|
|
||||||
def add_user_to_group(username, group):
|
def add_user_to_group(username, group):
|
||||||
"""Add a user to a group"""
|
"""Add a user to a group"""
|
||||||
cmd = [
|
cmd = ['gpasswd', '-a', username, group]
|
||||||
'gpasswd', '-a',
|
|
||||||
username,
|
|
||||||
group
|
|
||||||
]
|
|
||||||
log("Adding user {} to group {}".format(username, group))
|
log("Adding user {} to group {}".format(username, group))
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
@ -128,15 +128,18 @@ class ServiceManager(object):
|
|||||||
"""
|
"""
|
||||||
Handle the current hook by doing The Right Thing with the registered services.
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
"""
|
"""
|
||||||
hook_name = hookenv.hook_name()
|
hookenv._run_atstart()
|
||||||
if hook_name == 'stop':
|
try:
|
||||||
self.stop_services()
|
hook_name = hookenv.hook_name()
|
||||||
else:
|
if hook_name == 'stop':
|
||||||
self.reconfigure_services()
|
self.stop_services()
|
||||||
self.provide_data()
|
else:
|
||||||
cfg = hookenv.config()
|
self.reconfigure_services()
|
||||||
if cfg.implicit_save:
|
self.provide_data()
|
||||||
cfg.save()
|
except SystemExit as x:
|
||||||
|
if x.code is None or x.code == 0:
|
||||||
|
hookenv._run_atexit()
|
||||||
|
hookenv._run_atexit()
|
||||||
|
|
||||||
def provide_data(self):
|
def provide_data(self):
|
||||||
"""
|
"""
|
||||||
|
@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback):
|
|||||||
action.
|
action.
|
||||||
|
|
||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444):
|
owner='root', group='root', perms=0o444):
|
||||||
|
@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False):
|
|||||||
_run_apt_command(cmd, fatal)
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
def apt_hold(packages, fatal=False):
|
def apt_mark(packages, mark, fatal=False):
|
||||||
"""Hold one or more packages"""
|
"""Flag one or more packages using apt-mark"""
|
||||||
cmd = ['apt-mark', 'hold']
|
cmd = ['apt-mark', mark]
|
||||||
if isinstance(packages, six.string_types):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False):
|
|||||||
log("Holding {}".format(packages))
|
log("Holding {}".format(packages))
|
||||||
|
|
||||||
if fatal:
|
if fatal:
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
else:
|
else:
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd, universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_hold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'hold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_unhold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'unhold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs):
|
|||||||
for handler in handlers:
|
for handler in handlers:
|
||||||
try:
|
try:
|
||||||
installed_to = handler.install(source, *args, **kwargs)
|
installed_to = handler.install(source, *args, **kwargs)
|
||||||
except UnhandledSource:
|
except UnhandledSource as e:
|
||||||
pass
|
log('Install source attempt unsuccessful: {}'.format(e),
|
||||||
|
level='WARNING')
|
||||||
if not installed_to:
|
if not installed_to:
|
||||||
raise UnhandledSource("No handler found for source {}".format(source))
|
raise UnhandledSource("No handler found for source {}".format(source))
|
||||||
return installed_to
|
return installed_to
|
||||||
|
@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||||
|
# XXX: Why is this returning a boolean and a string? It's
|
||||||
|
# doomed to fail since "bool(can_handle('foo://'))" will be True.
|
||||||
return "Wrong source type"
|
return "Wrong source type"
|
||||||
if get_archive_handler(self.base_url(source)):
|
if get_archive_handler(self.base_url(source)):
|
||||||
return True
|
return True
|
||||||
@ -155,7 +157,11 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
else:
|
else:
|
||||||
algorithms = hashlib.algorithms_available
|
algorithms = hashlib.algorithms_available
|
||||||
if key in algorithms:
|
if key in algorithms:
|
||||||
check_hash(dld_file, value, key)
|
if len(value) != 1:
|
||||||
|
raise TypeError(
|
||||||
|
"Expected 1 hash value, not %d" % len(value))
|
||||||
|
expected = value[0]
|
||||||
|
check_hash(dld_file, expected, key)
|
||||||
if checksum:
|
if checksum:
|
||||||
check_hash(dld_file, checksum, hash_type)
|
check_hash(dld_file, checksum, hash_type)
|
||||||
return extract(dld_file, dest)
|
return extract(dld_file, dest)
|
||||||
|
@ -67,7 +67,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch, depth)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
except GitCommandError as e:
|
||||||
raise UnhandledSource(e.message)
|
raise UnhandledSource(e)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
return dest_dir
|
return dest_dir
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
import ConfigParser
|
import ConfigParser
|
||||||
import distro_info
|
import distro_info
|
||||||
import io
|
import io
|
||||||
@ -173,6 +174,11 @@ class AmuletUtils(object):
|
|||||||
|
|
||||||
Verify that the specified section of the config file contains
|
Verify that the specified section of the config file contains
|
||||||
the expected option key:value pairs.
|
the expected option key:value pairs.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Validating config file data ({} in {} on {})'
|
self.log.debug('Validating config file data ({} in {} on {})'
|
||||||
'...'.format(section, config_file,
|
'...'.format(section, config_file,
|
||||||
@ -185,9 +191,20 @@ class AmuletUtils(object):
|
|||||||
for k in expected.keys():
|
for k in expected.keys():
|
||||||
if not config.has_option(section, k):
|
if not config.has_option(section, k):
|
||||||
return "section [{}] is missing option {}".format(section, k)
|
return "section [{}] is missing option {}".format(section, k)
|
||||||
if config.get(section, k) != expected[k]:
|
|
||||||
|
actual = config.get(section, k)
|
||||||
|
v = expected[k]
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if actual != v:
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual):
|
||||||
return "section [{}] {}:{} != expected {}:{}".format(
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
section, k, config.get(section, k), k, expected[k])
|
section, k, actual, k, expected[k])
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _validate_dict_data(self, expected, actual):
|
def _validate_dict_data(self, expected, actual):
|
||||||
@ -195,7 +212,7 @@ class AmuletUtils(object):
|
|||||||
|
|
||||||
Compare expected dictionary data vs actual dictionary data.
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
longs, or can be a function that evaluate a variable and returns a
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
bool.
|
bool.
|
||||||
"""
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
@ -206,8 +223,10 @@ class AmuletUtils(object):
|
|||||||
if (isinstance(v, six.string_types) or
|
if (isinstance(v, six.string_types) or
|
||||||
isinstance(v, bool) or
|
isinstance(v, bool) or
|
||||||
isinstance(v, six.integer_types)):
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
if v != actual[k]:
|
if v != actual[k]:
|
||||||
return "{}:{}".format(k, actual[k])
|
return "{}:{}".format(k, actual[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
elif not v(actual[k]):
|
elif not v(actual[k]):
|
||||||
return "{}:{}".format(k, actual[k])
|
return "{}:{}".format(k, actual[k])
|
||||||
else:
|
else:
|
||||||
@ -406,3 +425,109 @@ class AmuletUtils(object):
|
|||||||
"""Convert a relative file path to a file URL."""
|
"""Convert a relative file path to a file URL."""
|
||||||
_abs_path = os.path.abspath(file_rel_path)
|
_abs_path = os.path.abspath(file_rel_path)
|
||||||
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||||
|
|
||||||
|
def check_commands_on_units(self, commands, sentry_units):
|
||||||
|
"""Check that all commands in a list exit zero on all
|
||||||
|
sentry units in a list.
|
||||||
|
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking exit codes for {} commands on {} '
|
||||||
|
'sentry units...'.format(len(commands),
|
||||||
|
len(sentry_units)))
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
for cmd in commands:
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
return ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_process_id_list(self, sentry_unit, process_name):
|
||||||
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
|
for a single process name.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param process_name: Process name
|
||||||
|
:returns: List of process IDs
|
||||||
|
"""
|
||||||
|
cmd = 'pidof {}'.format(process_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output).split()
|
||||||
|
|
||||||
|
def get_unit_process_ids(self, unit_processes):
|
||||||
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
|
process IDs."""
|
||||||
|
pid_dict = {}
|
||||||
|
for sentry_unit, process_list in unit_processes.iteritems():
|
||||||
|
pid_dict[sentry_unit] = {}
|
||||||
|
for process in process_list:
|
||||||
|
pids = self.get_process_id_list(sentry_unit, process)
|
||||||
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
|
return pid_dict
|
||||||
|
|
||||||
|
def validate_unit_process_ids(self, expected, actual):
|
||||||
|
"""Validate process id quantities for services on units."""
|
||||||
|
self.log.debug('Checking units for running processes...')
|
||||||
|
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||||
|
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||||
|
|
||||||
|
if len(actual) != len(expected):
|
||||||
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_sentry, e_proc_names) in expected.iteritems():
|
||||||
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
|
if e_sentry in actual.keys():
|
||||||
|
a_proc_names = actual[e_sentry]
|
||||||
|
else:
|
||||||
|
return ('Expected sentry ({}) not found in actual dict data.'
|
||||||
|
'{}'.format(e_sentry_name, e_sentry))
|
||||||
|
|
||||||
|
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||||
|
return ('Process name count mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
||||||
|
zip(e_proc_names.items(), a_proc_names.items()):
|
||||||
|
if e_proc_name != a_proc_name:
|
||||||
|
return ('Process name mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
|
a_pids_length = len(a_pids)
|
||||||
|
if e_pids_length != a_pids_length:
|
||||||
|
return ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids_length,
|
||||||
|
a_pids))
|
||||||
|
else:
|
||||||
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||||
|
"""Check that all dicts within a list are identical."""
|
||||||
|
hashes = []
|
||||||
|
for _dict in list_of_dicts:
|
||||||
|
hashes.append(hash(frozenset(_dict.items())))
|
||||||
|
|
||||||
|
self.log.debug('Hashes: {}'.format(hashes))
|
||||||
|
if len(set(hashes)) == 1:
|
||||||
|
self.log.debug('Dicts within list are identical')
|
||||||
|
else:
|
||||||
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
|
return None
|
||||||
|
@ -79,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Openstack subordinate charms do not expose an origin option as that
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# is controlled by the principle
|
# as that is controlled by the principle.
|
||||||
ignore = ['neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -148,3 +148,36 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
else:
|
else:
|
||||||
return releases[self.series]
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
||||||
|
@ -14,16 +14,20 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
import heatclient.v1.client as heat_client
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import swiftclient
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
@ -171,6 +175,16 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
|
password, tenant):
|
||||||
|
"""Authenticates admin user with cinder."""
|
||||||
|
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||||
|
service_ip = \
|
||||||
|
keystone_sentry.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
@ -212,9 +226,29 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return nova_client.Client(username=user, api_key=password,
|
return nova_client.Client(username=user, api_key=password,
|
||||||
project_id=tenant, auth_url=ep)
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with swift api."""
|
||||||
|
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return swiftclient.Connection(authurl=ep,
|
||||||
|
user=user,
|
||||||
|
key=password,
|
||||||
|
tenant_name=tenant,
|
||||||
|
auth_version='2.0')
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
def create_cirros_image(self, glance, image_name):
|
||||||
"""Download the latest cirros image and upload it to glance."""
|
"""Download the latest cirros image and upload it to glance,
|
||||||
self.log.debug('Creating glance image ({})...'.format(image_name))
|
validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param glance: pointer to authenticated glance connection
|
||||||
|
:param image_name: display name for new image
|
||||||
|
:returns: glance image pointer
|
||||||
|
"""
|
||||||
|
self.log.debug('Creating glance cirros image '
|
||||||
|
'({})...'.format(image_name))
|
||||||
|
|
||||||
|
# Download cirros image
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
if http_proxy:
|
if http_proxy:
|
||||||
@ -223,33 +257,51 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
else:
|
else:
|
||||||
opener = urllib.FancyURLopener()
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||||
version = f.read().strip()
|
version = f.read().strip()
|
||||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||||
local_path = os.path.join('tests', cirros_img)
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
if not os.path.exists(local_path):
|
||||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||||
version, cirros_img)
|
version, cirros_img)
|
||||||
opener.retrieve(cirros_url, local_path)
|
opener.retrieve(cirros_url, local_path)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
# Create glance image
|
||||||
with open(local_path) as f:
|
with open(local_path) as f:
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
disk_format='qcow2',
|
disk_format='qcow2',
|
||||||
container_format='bare', data=f)
|
container_format='bare', data=f)
|
||||||
count = 1
|
|
||||||
status = image.status
|
|
||||||
while status != 'active' and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
image = glance.images.get(image.id)
|
|
||||||
status = image.status
|
|
||||||
self.log.debug('image status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'active':
|
# Wait for image to reach active status
|
||||||
self.log.error('image creation timed out')
|
img_id = image.id
|
||||||
return None
|
ret = self.resource_reaches_status(glance.images, img_id,
|
||||||
|
expected_stat='active',
|
||||||
|
msg='Image status wait')
|
||||||
|
if not ret:
|
||||||
|
msg = 'Glance image failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new image
|
||||||
|
self.log.debug('Validating image attributes...')
|
||||||
|
val_img_name = glance.images.get(img_id).name
|
||||||
|
val_img_stat = glance.images.get(img_id).status
|
||||||
|
val_img_pub = glance.images.get(img_id).is_public
|
||||||
|
val_img_cfmt = glance.images.get(img_id).container_format
|
||||||
|
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||||
|
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||||
|
'container fmt:{} disk fmt:{}'.format(
|
||||||
|
val_img_name, val_img_pub, img_id,
|
||||||
|
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||||
|
|
||||||
|
if val_img_name == image_name and val_img_stat == 'active' \
|
||||||
|
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||||
|
and val_img_dfmt == 'qcow2':
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
@ -260,22 +312,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
'delete_resource instead of delete_image.')
|
'delete_resource instead of delete_image.')
|
||||||
self.log.debug('Deleting glance image ({})...'.format(image))
|
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||||
num_before = len(list(glance.images.list()))
|
return self.delete_resource(glance.images, image, msg='glance image')
|
||||||
glance.images.delete(image)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
self.log.debug('number of images: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('image deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
"""Create the specified instance."""
|
"""Create the specified instance."""
|
||||||
@ -308,22 +345,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
'delete_resource instead of delete_instance.')
|
'delete_resource instead of delete_instance.')
|
||||||
self.log.debug('Deleting instance ({})...'.format(instance))
|
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||||
num_before = len(list(nova.servers.list()))
|
return self.delete_resource(nova.servers, instance,
|
||||||
nova.servers.delete(instance)
|
msg='nova instance')
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
self.log.debug('number of instances: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('instance deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||||
"""Create a new keypair, or return pointer if it already exists."""
|
"""Create a new keypair, or return pointer if it already exists."""
|
||||||
@ -339,6 +362,88 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
_keypair = nova.keypairs.create(name=keypair_name)
|
_keypair = nova.keypairs.create(name=keypair_name)
|
||||||
return _keypair
|
return _keypair
|
||||||
|
|
||||||
|
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||||
|
img_id=None, src_vol_id=None, snap_id=None):
|
||||||
|
"""Create cinder volume, optionally from a glance image, OR
|
||||||
|
optionally as a clone of an existing volume, OR optionally
|
||||||
|
from a snapshot. Wait for the new volume status to reach
|
||||||
|
the expected status, validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param vol_name: cinder volume display name
|
||||||
|
:param vol_size: size in gigabytes
|
||||||
|
:param img_id: optional glance image id
|
||||||
|
:param src_vol_id: optional source volume id to clone
|
||||||
|
:param snap_id: optional snapshot id to use
|
||||||
|
:returns: cinder volume pointer
|
||||||
|
"""
|
||||||
|
# Handle parameter input and avoid impossible combinations
|
||||||
|
if img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume from image
|
||||||
|
self.log.debug('Creating cinder volume from glance image...')
|
||||||
|
bootable = 'true'
|
||||||
|
elif src_vol_id and not img_id and not snap_id:
|
||||||
|
# Clone an existing volume
|
||||||
|
self.log.debug('Cloning cinder volume...')
|
||||||
|
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||||
|
elif snap_id and not src_vol_id and not img_id:
|
||||||
|
# Create volume from snapshot
|
||||||
|
self.log.debug('Creating cinder volume from snapshot...')
|
||||||
|
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||||
|
vol_size = snap.size
|
||||||
|
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||||
|
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||||
|
elif not img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume
|
||||||
|
self.log.debug('Creating cinder volume...')
|
||||||
|
bootable = 'false'
|
||||||
|
else:
|
||||||
|
# Impossible combination of parameters
|
||||||
|
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||||
|
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||||
|
img_id, src_vol_id,
|
||||||
|
snap_id))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Create new volume
|
||||||
|
try:
|
||||||
|
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||||
|
imageRef=img_id,
|
||||||
|
size=vol_size,
|
||||||
|
source_volid=src_vol_id,
|
||||||
|
snapshot_id=snap_id)
|
||||||
|
vol_id = vol_new.id
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to create volume: {}'.format(e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Wait for volume to reach available status
|
||||||
|
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||||
|
expected_stat="available",
|
||||||
|
msg="Volume status wait")
|
||||||
|
if not ret:
|
||||||
|
msg = 'Cinder volume failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new volume
|
||||||
|
self.log.debug('Validating volume attributes...')
|
||||||
|
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||||
|
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||||
|
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||||
|
val_vol_size = cinder.volumes.get(vol_id).size
|
||||||
|
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||||
|
'{} size:{}'.format(val_vol_name, vol_id,
|
||||||
|
val_vol_stat, val_vol_boot,
|
||||||
|
val_vol_size))
|
||||||
|
|
||||||
|
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||||
|
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return vol_new
|
||||||
|
|
||||||
def delete_resource(self, resource, resource_id,
|
def delete_resource(self, resource, resource_id,
|
||||||
msg="resource", max_wait=120):
|
msg="resource", max_wait=120):
|
||||||
"""Delete one openstack resource, such as one instance, keypair,
|
"""Delete one openstack resource, such as one instance, keypair,
|
||||||
@ -350,6 +455,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
:param max_wait: maximum wait time in seconds
|
:param max_wait: maximum wait time in seconds
|
||||||
:returns: True if successful, otherwise False
|
:returns: True if successful, otherwise False
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Deleting OpenStack resource '
|
||||||
|
'{} ({})'.format(resource_id, msg))
|
||||||
num_before = len(list(resource.list()))
|
num_before = len(list(resource.list()))
|
||||||
resource.delete(resource_id)
|
resource.delete(resource_id)
|
||||||
|
|
||||||
@ -411,3 +518,87 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('{} never reached expected status: '
|
self.log.debug('{} never reached expected status: '
|
||||||
'{}'.format(resource_id, expected_stat))
|
'{}'.format(resource_id, expected_stat))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_ceph_osd_id_cmd(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return ("`initctl list | grep 'ceph-osd ' | "
|
||||||
|
"awk 'NR=={} {{ print $2 }}' | "
|
||||||
|
"grep -o '[0-9]*'`".format(index + 1))
|
||||||
|
|
||||||
|
def get_ceph_pools(self, sentry_unit):
|
||||||
|
"""Return a dict of ceph pools from a single ceph unit, with
|
||||||
|
pool name as keys, pool id as vals."""
|
||||||
|
pools = {}
|
||||||
|
cmd = 'sudo ceph osd lspools'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||||
|
for pool in str(output).split(','):
|
||||||
|
pool_id_name = pool.split(' ')
|
||||||
|
if len(pool_id_name) == 2:
|
||||||
|
pool_id = pool_id_name[0]
|
||||||
|
pool_name = pool_id_name[1]
|
||||||
|
pools[pool_name] = int(pool_id)
|
||||||
|
|
||||||
|
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||||
|
pools))
|
||||||
|
return pools
|
||||||
|
|
||||||
|
def get_ceph_df(self, sentry_unit):
|
||||||
|
"""Return dict of ceph df json output, including ceph pool state.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:returns: Dict of ceph df output
|
||||||
|
"""
|
||||||
|
cmd = 'sudo ceph df --format=json'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return json.loads(output)
|
||||||
|
|
||||||
|
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||||
|
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||||
|
pool name, object count and disk space used for the specified
|
||||||
|
pool ID number.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param pool_id: Ceph pool ID
|
||||||
|
:returns: List of pool name, object count, kb disk space used
|
||||||
|
"""
|
||||||
|
df = self.get_ceph_df(sentry_unit)
|
||||||
|
pool_name = df['pools'][pool_id]['name']
|
||||||
|
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||||
|
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||||
|
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||||
|
'{} kb used'.format(pool_name, pool_id,
|
||||||
|
obj_count, kb_used))
|
||||||
|
return pool_name, obj_count, kb_used
|
||||||
|
|
||||||
|
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||||
|
"""Validate ceph pool samples taken over time, such as pool
|
||||||
|
object counts or pool kb used, before adding, after adding, and
|
||||||
|
after deleting items which affect those pool attributes. The
|
||||||
|
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||||
|
to be less than the 2nd.
|
||||||
|
|
||||||
|
:param samples: List containing 3 data samples
|
||||||
|
:param sample_type: String for logging and usage context
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
original, created, deleted = range(3)
|
||||||
|
if samples[created] <= samples[original] or \
|
||||||
|
samples[deleted] >= samples[created]:
|
||||||
|
return ('Ceph {} samples ({}) '
|
||||||
|
'unexpected.'.format(sample_type, samples))
|
||||||
|
else:
|
||||||
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
|
'{}'.format(sample_type, samples))
|
||||||
|
return None
|
||||||
|
Loading…
Reference in New Issue
Block a user