Remove bundled in tree tempest plugin

Also use the newly created blazar-tempest-plugin in Zuul jobs.

Depends-On: I32f2ef3ddbb30da8061ccc035aae0428e6dd5450
Change-Id: Ia03068d8f84d988b470252efe483890f1900f488
This commit is contained in:
Chandan Kumar 2018-01-04 17:29:00 +05:30 committed by Pierre Riteau
parent 9f0bff4bcc
commit e4d30aec1c
17 changed files with 5 additions and 1547 deletions

View File

@ -18,6 +18,7 @@
- openstack/blazar - openstack/blazar
- openstack/blazar-nova - openstack/blazar-nova
- openstack/python-blazarclient - openstack/python-blazarclient
- openstack/blazar-tempest-plugin
- project: - project:
name: openstack/blazar name: openstack/blazar

View File

@ -1,5 +0,0 @@
===============================================
Tempest Integration of blazar
===============================================
This directory contains Tempest tests to cover the blazar project

View File

@ -1,49 +0,0 @@
# Copyright 2014 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
service_option = [
cfg.BoolOpt("climate",
default=True,
help="Whether or not climate is expected to be available. "
"This config remains for backward compatibility."),
cfg.BoolOpt("blazar",
default=True,
help="Whether or not blazar is expected to be available"),
]
resource_reservation_group = cfg.OptGroup(name='resource_reservation',
title='Resource reservation service '
'options')
ResourceReservationGroup = [
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the resource_reservation "
"service."),
cfg.IntOpt('lease_interval',
default=10,
help="Time in seconds between lease status checks."),
cfg.IntOpt('lease_end_timeout',
default=300,
help="Timeout in seconds to wait for a lease to finish.")
]

View File

@ -1,47 +0,0 @@
# Copyright 2015
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from blazar_tempest_plugin import config as blazar_config
class BlazarTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "blazar_tempest_plugin/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(conf,
blazar_config.service_available_group,
blazar_config.service_option)
config.register_opt_group(conf,
blazar_config.resource_reservation_group,
blazar_config.ResourceReservationGroup)
def get_opt_lists(self):
return [
(blazar_config.service_available_group.name,
blazar_config.service_option),
(blazar_config.resource_reservation_group.name,
blazar_config.ResourceReservationGroup)
]

View File

@ -1,77 +0,0 @@
# Copyright 2017 NTT
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.lib.common import rest_client
class ResourceReservationV1Client(rest_client.RestClient):
"""Client class for accessing the resource reservation API."""
CLIMATECLIENT_VERSION = '1'
lease = '/leases'
lease_path = '/leases/%s'
host = '/os-hosts'
host_path = '/os-hosts/%s'
def _response_helper(self, resp, body=None):
if body:
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_lease(self):
resp, body = self.get(self.lease)
return self._response_helper(resp, body)
def get_lease(self, lease):
resp, body = self.get(self.lease_path % lease)
return self._response_helper(resp, body)
def create_lease(self, body):
body = json.dumps(body)
resp, body = self.post(self.lease, body=body)
return self._response_helper(resp, body)
def update_lease(self, lease, body):
body = json.dumps(body)
resp, body = self.put(self.lease_path % lease, body=body)
return self._response_helper(resp, body)
def delete_lease(self, lease):
resp, body = self.delete(self.lease_path % lease)
return self._response_helper(resp, body)
def list_host(self):
resp, body = self.get(self.host)
return self._response_helper(resp, body)
def get_host(self, host):
resp, body = self.get(self.host_path % host)
return self._response_helper(resp, body)
def create_host(self, body):
body = json.dumps(body)
resp, body = self.post(self.host, body=body)
return self._response_helper(resp, body)
def update_host(self, host, body):
body = json.dumps(body)
resp, body = self.put(self.host_path % host, body=body)
return self._response_helper(resp, body)
def delete_host(self, host):
resp, body = self.delete(self.host_path % host)
return self._response_helper(resp, body)

View File

@ -1,686 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from oslo_log import log
from oslo_serialization import jsonutils as json
from tempest.common import compute
from tempest.common import image as common_image
from tempest.common.utils.linux import remote_client
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_floating_ips_client = (
cls.os_primary.compute_floating_ips_client)
if CONF.service_available.glance:
# Check if glance v1 is available to determine which client to use.
if CONF.image_feature_enabled.api_v1:
cls.image_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.keypairs_client = cls.os_primary.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.os_primary.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.servers_client = cls.os_primary.servers_client
cls.interface_client = cls.os_primary.interfaces_client
# Neutron network client
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_primary.routers_client
cls.subnets_client = cls.os_primary.subnets_client
cls.floating_ips_client = cls.os_primary.floating_ips_client
cls.security_groups_client = cls.os_primary.security_groups_client
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
if CONF.volume_feature_enabled.api_v2:
cls.volumes_client = cls.os_primary.volumes_v2_client
cls.snapshots_client = cls.os_primary.snapshots_v2_client
else:
cls.volumes_client = cls.os_primary.volumes_client
cls.snapshots_client = cls.os_primary.snapshots_client
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until='ACTIVE',
clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
# tests need to be run regardless of the run_validation and
# validatable parameters and thus until the ssh validation job
# becomes voting in CI. The test resources management and IP
# association are taken care of in the scenario tests.
# Therefore, the validatable parameter is set to false in all
# those tests. In this way create_server just return a standard
# server and the scenario tests always perform ssh checks.
# Needed for the cross_tenant_traffic test:
if clients is None:
clients = self.os_primary
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
security_groups = \
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = kwargs.pop('networks', [])
else:
networks = []
# If there are no networks passed to us we look up
# for the project's private networks and create a port.
# The same behaviour as we would expect when passing
# the call to the clients with no networks
if not networks:
networks = clients.networks_client.list_networks(
**{'router:external': False, 'fields': 'id'})['networks']
# It's net['uuid'] if networks come from kwargs
# and net['id'] if they come from
# clients.networks_client.list_networks
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
port = self._create_port(network_id=net_id,
client=clients.ports_client,
**create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
if ports:
kwargs['networks'] = ports
self.ports = ports
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
clients,
tenant_network=tenant_network,
wait_until=wait_until,
name=name, flavor=flavor,
image_id=image_id, **kwargs)
self.addCleanup(waiters.wait_for_server_termination,
clients.servers_client, body['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, body['id'])
server = clients.servers_client.show_server(body['id'])['server']
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None):
if size is None:
size = CONF.volume.volume_size
if imageRef:
image = self.compute_images_client.show_image(imageRef)['image']
min_disk = image.get('minDisk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.volumes_client.delete_volume, volume['id'])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
client = self.admin_volume_types_client
if not name:
class_name = self.__class__.__name__
name = data_utils.rand_name(class_name + '-volume-type')
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
extra_specs = {}
if backend_name:
extra_specs = {"volume_backend_name": backend_name}
body = client.create_volume_type(name=randomized_name,
extra_specs=extra_specs)
volume_type = body['volume_type']
self.assertIn('id', volume_type)
self.addCleanup(client.delete_volume_type, volume_type['id'])
return volume_type
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip_address,
'error': e})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
}
if CONF.image_feature_enabled.api_v1:
params['is_public'] = 'False'
params['properties'] = properties
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params['visibility'] = 'private'
# Additional properties are flattened out in the v2 API.
params.update(properties)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
with open(path, 'rb') as image_file:
if CONF.image_feature_enabled.api_v1:
self.image_client.update_image(image['id'], data=image_file)
else:
self.image_client.store_image_file(image['id'], image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
"properties: %s, ami: %s, ari: %s, aki: %s",
img_path, img_container_format, img_disk_format,
img_properties, ami_img_path, ari_img_path, aki_img_path)
try:
image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s", image)
return image
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
try:
console_output = self.servers_client.get_console_output(
server['id'])['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
LOG.debug("Server %s disappeared(deleted) while looking "
"for the console log", server['id'])
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.compute_images_client
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
image = _images_client.create_image(server['id'], name=name)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
self.addCleanup(_image_client.wait_for_resource_deletion,
image_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
_image_client.delete_image, image_id)
if CONF.image_feature_enabled.api_v1:
# In glance v1 the additional properties are stored in the headers.
resp = _image_client.check_image(image_id)
snapshot_image = common_image.get_image_meta_from_headers(resp)
image_props = snapshot_image.get('properties', {})
else:
# In glance v2 the additional properties are flattened.
snapshot_image = _image_client.show_image(image_id)
image_props = snapshot_image
bdm = image_props.get('block_device_mapping')
if bdm:
bdm = json.loads(bdm)
if bdm and 'snapshot_id' in bdm[0]:
snapshot_id = bdm[0]['snapshot_id']
self.addCleanup(
self.snapshots_client.wait_for_resource_deletion,
snapshot_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.snapshots_client.delete_snapshot,
snapshot_id)
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot_id,
'available')
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
image_name, server['name'])
return snapshot_image
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self, server, volume):
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild_server(
server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
if mtu:
cmd += [
# don't fragment
'-M', 'do',
# ping receives just the size of ICMP payload
'-s', str(net_utils.get_ping_payload_size(mtu, 4))
]
cmd.append(ip_address)
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
caller = test_utils.find_test_caller()
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
' expected result is %(should_succeed)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'should_succeed':
'reachable' if should_succeed else 'unreachable'
})
result = test_utils.call_until_true(ping, timeout, 1)
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
'ping result is %(result)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
mtu=None):
"""Check server connectivity
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
mtu=mtu),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None, mtu=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug('checking network connections to IP %s with user: %s',
ip_address, username)
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect,
mtu=mtu)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
if not pool_name:
pool_name = CONF.network.floating_network_name
floating_ip = (self.compute_floating_ips_client.
create_floating_ip(pool=pool_name)['floating_ip'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.compute_floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.make_fs(dev_name)
ssh_client.mount(dev_name, mount_path)
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
ssh_client.exec_command(cmd_timestamp)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_server_ip(self, server):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
return self.create_floating_ip(server)['ip']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
if CONF.validation.network_for_ssh:
addresses = server['addresses'][
CONF.validation.network_for_ssh]
else:
creds_provider = self._get_credentials_provider()
net_creds = creds_provider.get_primary_creds()
network = getattr(net_creds, 'network', None)
addresses = (server['addresses'][network['name']]
if network else [])
for address in addresses:
if (address['version'] == CONF.validation.ip_version_for_ssh
and address['OS-EXT-IPS:type'] == 'fixed'):
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()

View File

@ -1,114 +0,0 @@
# Copyright 2014 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from tempest import clients as tempestclients
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import test_utils
from blazar_tempest_plugin.services.reservation import (
reservation_client as clients)
from blazar_tempest_plugin.tests.scenario import manager_freeze as manager
CONF = config.CONF
LOG = log.getLogger(__name__)
class ResourceReservationScenarioTest(manager.ScenarioTest):
"""Base class for resource reservation scenario tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(ResourceReservationScenarioTest, cls).setup_clients()
if not (CONF.service_available.climate or
CONF.service_available.blazar):
raise cls.skipException("Resource reservation support is"
"required")
cred_provider = cls._get_credentials_provider()
creds = cred_provider.get_credentials('admin')
auth_prov = tempestclients.get_auth_provider(creds._credentials)
cls.os_admin.resource_reservation_client = (
clients.ResourceReservationV1Client(auth_prov,
'reservation',
CONF.identity.region))
cls.reservation_client = (
cls.os_admin.resource_reservation_client)
def get_lease_by_name(self, lease_name):
# the same as the blazarclient does it: ask for the entire list
lease_list = self.reservation_client.list_lease()
named_lease = []
# and then search by lease_name
named_lease = (
filter(lambda lease: lease['name'] == lease_name, lease_list))
if named_lease:
return self.reservation_client.get_lease(
named_lease[0]['id'])
else:
message = "Unable to find lease with name '%s'" % lease_name
raise exceptions.NotFound(message)
def delete_lease(self, lease_id):
return self.reservation_client.delete_lease(lease_id)
def wait_for_lease_end(self, lease_id):
def check_lease_end():
try:
lease = self.reservation_client.get_lease(lease_id)['lease']
if lease:
events = lease['events']
return len(filter(lambda evt:
evt['event_type'] == 'end_lease' and
evt['status'] == 'DONE',
events)) > 0
else:
LOG.info("Lease with id %s is empty", lease_id)
except Exception as e:
LOG.info("Unable to find lease with id %(lease_id)s. "
"Exception: %(message)s",
{'lease_id': lease_id, 'message': e.message})
return True
if not test_utils.call_until_true(
check_lease_end,
CONF.resource_reservation.lease_end_timeout,
CONF.resource_reservation.lease_interval):
message = "Timed out waiting for lease to change status to DONE"
raise exceptions.TimeoutException(message)
def remove_image_snapshot(self, image_name):
try:
image = filter(lambda i:
i['name'] == image_name,
self.image_client.list())
self.image_client.delete(image)
except Exception as e:
LOG.info("Unable to delete %(image_name)s snapshot. "
"Exception: %(message)s",
{'image_name': image_name, 'message': e.message})
def is_flavor_enough(self, flavor_id, image_id):
image = self.compute_images_client.show_image(image_id)['image']
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
return image['minDisk'] <= flavor['disk']

View File

@ -1,337 +0,0 @@
# Copyright 2017 NTT Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from oslo_log import log as logging
from six.moves import range
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
from blazar_tempest_plugin.tests.scenario import (
resource_reservation_scenario as rrs)
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestHostReservationScenario(rrs.ResourceReservationScenarioTest):
"""A Scenario test class that checks host reservation."""
MAX_RETRY = 20
WAIT_TIME = 2
def setUp(self):
super(TestHostReservationScenario, self).setUp()
self.aggr_client = self.os_admin.aggregates_client
def tearDown(self):
super(TestHostReservationScenario, self).tearDown()
def fetch_one_compute_host(self):
"""Returns a first host listed in nova-compute services."""
compute = next(iter(self.os_admin.services_client.
list_services(binary='nova-compute')['services']))
return compute
def get_lease_body(self, lease_name, host_name):
current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(hours=1)
body = {
"start_date": "now",
"end_date": end_time.strftime('%Y-%m-%d %H:%M'),
"name": lease_name,
"events": [],
}
body["reservations"] = [
{
"hypervisor_properties": ('["==", "$hypervisor_hostname", "'
'%s"]' % host_name),
"max": 1,
"min": 1,
"resource_type": 'physical:host',
"resource_properties": ''
}
]
return body
def get_lease_body_missing_param(self, lease_name, host_name):
current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(hours=1)
body = {
"start_date": "now",
"end_date": end_time.strftime('%Y-%m-%d %H:%M'),
"name": lease_name,
"events": [],
}
body["reservations"] = [
{
"hypervisor_properties": ('["==", "$hypervisor_hostname", "'
'%s"]' % host_name),
"min": '1',
"resource_type": 'physical:host',
"resource_properties": ''
}
]
return body
def get_invalid_lease_body(self, lease_name, host_name):
current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(hours=1)
body = {
"start_date": "now",
"end_date": end_time.strftime('%Y-%m-%d %H:%M'),
"name": lease_name,
"events": [],
}
body["reservations"] = [
{
"hypervisor_properties": ('["==", "$hypervisor_hostname", "'
'%s"]' % host_name),
"max": 'foo',
"min": 'bar',
"resource_type": 'physical:host',
"resource_properties": ''
}
]
return body
def get_expiration_lease_body(self, lease_name, host_name):
current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(seconds=90)
body = {
'start_date': "now",
'end_date': end_time.strftime('%Y-%m-%d %H:%M'),
'name': lease_name,
'events': [],
}
body['reservations'] = [
{
'hypervisor_properties': ('["==", "$hypervisor_hostname", "'
'%s"]' % host_name),
'max': 1,
'min': 1,
'resource_type': 'physical:host',
'resource_properties': ''
}
]
return body
def fetch_aggregate_by_name(self, name):
aggregates = self.aggr_client.list_aggregates()['aggregates']
try:
aggr = next(iter(filter(lambda aggr: aggr['name'] == name,
aggregates)))
except StopIteration:
err_msg = "aggregate with name %s doesn't exist." % name
raise exceptions.NotFound(err_msg)
return aggr
def wait_until_aggregated(self, aggregate_name, host_name):
for i in range(self.MAX_RETRY):
try:
aggr = self.fetch_aggregate_by_name(aggregate_name)
self.assertTrue(host_name in aggr['hosts'])
return
except Exception:
pass
time.sleep(self.WAIT_TIME)
err_msg = ("hostname %s doesn't exist in aggregate %s."
% (host_name, aggregate_name))
raise exceptions.NotFound(err_msg)
def _add_host_once(self):
host = self.fetch_one_compute_host()
hosts = self.reservation_client.list_host()['hosts']
try:
next(iter(filter(
lambda h: h['hypervisor_hostname'] == host['host'], hosts)))
except StopIteration:
self.reservation_client.create_host({'name': host['host']})
return host
@decorators.attr(type='smoke')
def test_host_reservation(self):
# Create the host if it doesn't exists
host = self._add_host_once()
# check the host is in freepool
freepool = self.fetch_aggregate_by_name('freepool')
self.assertTrue(host['host'] in freepool['hosts'])
# try creating a new lease with a missing parameter
body = self.get_lease_body_missing_param('scenario-1-missing-param',
host['host'])
self.assertRaises(exceptions.BadRequest,
self.reservation_client.create_lease, body)
# try creating a new lease with an invalid request
body = self.get_invalid_lease_body('scenario-1-invalid', host['host'])
self.assertRaises(exceptions.BadRequest,
self.reservation_client.create_lease, body)
# create new lease and start reservation immediately
body = self.get_lease_body('scenario-1', host['host'])
lease = self.reservation_client.create_lease(body)['lease']
# check host added to the reservation
reservation_id = next(iter(lease['reservations']))['id']
self.wait_until_aggregated(reservation_id, host['host'])
# create an instance with reservation id
create_kwargs = {
'scheduler_hints': {
"reservation": reservation_id,
},
'image_id': CONF.compute.image_ref,
'flavor': CONF.compute.flavor_ref,
}
server = self.create_server(clients=self.os_admin,
**create_kwargs)
# ensure server is located on the requested host
self.assertEqual(host['host'], server['OS-EXT-SRV-ATTR:host'])
# delete the lease, which should trigger termination of the instance
self.reservation_client.delete_lease(lease['id'])
waiters.wait_for_server_termination(self.os_admin.servers_client,
server['id'])
# create an instance without reservation id, which is expected to fail
create_kwargs = {
'image_id': CONF.compute.image_ref,
'flavor': CONF.compute.flavor_ref,
}
server = self.create_server(clients=self.os_admin,
wait_until=None,
**create_kwargs)
waiters.wait_for_server_status(self.os_admin.servers_client,
server['id'], 'ERROR',
raise_on_error=False)
@decorators.attr(type='smoke')
def test_lease_expiration(self):
# create the host if it doesn't exist
host = self._add_host_once()
# create new lease and start reservation immediately
body = self.get_expiration_lease_body('scenario-2-expiration',
host['host'])
lease = self.reservation_client.create_lease(body)['lease']
lease_id = lease['id']
# check host added to the reservation
reservation_id = next(iter(lease['reservations']))['id']
self.wait_until_aggregated(reservation_id, host['host'])
create_kwargs = {
'scheduler_hints': {
'reservation': reservation_id,
},
'image_id': CONF.compute.image_ref,
'flavor': CONF.compute.flavor_ref,
}
server = self.create_server(clients=self.os_admin,
**create_kwargs)
# wait for lease end
self.wait_for_lease_end(lease_id)
# check if the lease has been correctly terminated and
# the instance is removed
waiters.wait_for_server_termination(self.os_admin.servers_client,
server['id'])
# check that the host aggregate was deleted
self.assertRaises(exceptions.NotFound,
self.fetch_aggregate_by_name, reservation_id)
# check that the host is back in the freepool
freepool = self.fetch_aggregate_by_name('freepool')
self.assertTrue(host['host'] in freepool['hosts'])
# check the reservation status
lease = self.reservation_client.get_lease(lease_id)['lease']
self.assertTrue('deleted' in
next(iter(lease['reservations']))['status'])
@decorators.attr(type='smoke')
def test_update_host_reservation(self):
# create the host if it doesn't exist
host = self._add_host_once()
# create new lease and start reservation immediately
body = self.get_lease_body('scenario-3-update', host['host'])
lease = self.reservation_client.create_lease(body)['lease']
lease_id = lease['id']
# check host added to the reservation
reservation_id = next(iter(lease['reservations']))['id']
self.wait_until_aggregated(reservation_id, host['host'])
# check the host aggregate for blazar
self.fetch_aggregate_by_name(reservation_id)
# create an instance with reservation id
create_kwargs = {
'scheduler_hints': {
'reservation': reservation_id,
},
'image_id': CONF.compute.image_ref,
'flavor': CONF.compute.flavor_ref,
}
server = self.create_server(clients=self.os_admin,
wait_until=None,
**create_kwargs)
waiters.wait_for_server_status(self.os_admin.servers_client,
server['id'], 'ACTIVE')
# wait enough time for the update API to succeed
time.sleep(75)
# update the lease end_time
end_time = datetime.datetime.utcnow()
body = {
'end_date': end_time.strftime('%Y-%m-%d %H:%M')
}
self.reservation_client.update_lease(lease_id,
body)['lease']
# check if the lease has been correctly terminated and
# the instance is removed
waiters.wait_for_server_termination(self.os_admin.servers_client,
server['id'])
# check that the host aggregate was deleted
self.assertRaises(exceptions.NotFound,
self.fetch_aggregate_by_name, reservation_id)
# check that the host is back in the freepool
freepool = self.fetch_aggregate_by_name('freepool')
self.assertTrue(host['host'] in freepool['hosts'])
# check the reservation status
lease = self.reservation_client.get_lease(lease_id)['lease']
self.assertTrue('deleted'in
next(iter(lease['reservations']))['status'])

View File

@ -1,182 +0,0 @@
# Copyright 2014 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import dateutil.parser
from oslo_log import log as logging
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib import decorators
from tempest import test
from blazar_tempest_plugin.tests.scenario import (
resource_reservation_scenario as rrs)
CONF = config.CONF
LOG = logging.getLogger(__name__)
# same as the one at blazar/manager/service
LEASE_DATE_FORMAT = "%Y-%m-%d %H:%M"
LEASE_MIN_DURATION = 2
# TODO(cmart): LEASE_IMAGE_PREFIX should be extracted from CONF
LEASE_IMAGE_PREFIX = 'reserved_'
class TestInstanceReservationScenario(rrs.ResourceReservationScenarioTest):
"""Test that checks the instance reservation scenario.
The following is the scenario outline:
1) Create an instance with the hint parameters
2) check vm was shelved
3) check vm became active
4) check that a new lease is created on blazar
5) check its param
6) wait lease end
7) make sure VM was snapshoted and removed
"""
def setUp(self):
super(TestInstanceReservationScenario, self).setUp()
# Setup image and flavor the test instance
# Support both configured and injected values
if not hasattr(self, 'image_ref'):
self.image_ref = CONF.compute.image_ref
if not hasattr(self, 'flavor_ref'):
self.flavor_ref = CONF.compute.flavor_ref
if not self.is_flavor_enough(self.flavor_ref, self.image_ref):
raise self.skipException(
'{image} does not fit in {flavor}'.format(
image=self.image_ref, flavor=self.flavor_ref
)
)
def tearDown(self):
super(TestInstanceReservationScenario, self).tearDown()
def add_keypair(self):
self.keypair = self.create_keypair()
def boot_server_with_lease_data(self, lease_data, wait):
self.add_keypair()
# Create server with lease_data
create_kwargs = {
'key_name': self.keypair['name'],
'scheduler_hints': lease_data
}
server = self.create_server(image_id=self.image_ref,
flavor=self.flavor_ref,
wait_until=wait,
**create_kwargs)
self.server_id = server['id']
self.server_name = server['name']
def check_lease_creation(self, expected_lease_data):
server = self.servers_client.show_server(self.server_id)['server']
expected_lease_params = json.loads(expected_lease_data['lease_params'])
# compare lease_data with data passed as parameter
lease = self.get_lease_by_name(expected_lease_params['name'])
# check lease dates!! (Beware of date format)
lease_start_date = dateutil.parser.parse(lease['start_date'])
lease_start_date = lease_start_date.strftime(LEASE_DATE_FORMAT)
lease_end_date = dateutil.parser.parse(lease['end_date'])
lease_end_date = lease_end_date.strftime(LEASE_DATE_FORMAT)
self.assertEqual(expected_lease_params['start'], lease_start_date)
self.assertEqual(expected_lease_params['end'], lease_end_date)
# check lease events!
events = lease['events']
self.assertTrue(len(events) >= 3)
self.assertFalse(
len(filter(lambda evt: evt['event_type'] != 'start_lease' and
evt['event_type'] != 'end_lease' and
evt['event_type'] != 'before_end_lease',
events)) > 0)
# check that only one reservation was made and it is for a vm
# compare the resource id from the lease with the server.id attribute!
reservations = lease['reservations']
self.assertTrue(len(reservations) == 1)
self.assertEqual(server['id'], reservations[0]['resource_id'])
self.assertEqual("virtual:instance",
lease['reservations'][0]['resource_type'])
def check_server_is_snapshoted(self):
image_name = LEASE_IMAGE_PREFIX + self.server_name
try:
images_list = self.image_client.list()
self.assertNotEmpty(
filter(lambda image: image.name == image_name, images_list))
except Exception as e:
message = ("Unable to find image with name '%s'. "
"Exception: %s" % (image_name, e.message))
raise exceptions.NotFound(message)
def check_server_status(self, expected_status):
server = self.servers_client.show_server(self.server_id)['server']
self.assertEqual(expected_status, server['status'])
# TODO(cmart): add blazar to services after pushing this code into tempest
@decorators.skip_because('Instance reservation is not supported yet.',
bug='1659200')
@decorators.attr(type='slow')
@test.services('compute', 'network')
def test_server_basic_resource_reservation_operation(self):
start_date = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
end_date = start_date + datetime.timedelta(minutes=LEASE_MIN_DURATION)
start_date = start_date.strftime(LEASE_DATE_FORMAT)
end_date = end_date.strftime(LEASE_DATE_FORMAT)
lease_name = 'scenario_test'
lease_data = {
'lease_params': '{"name": "%s",'
'"start": "%s",'
'"end": "%s"}'
% (lease_name, start_date, end_date)}
# boot the server and don't wait until it is active
self.boot_server_with_lease_data(lease_data, wait=False)
self.check_server_status('SHELVED_OFFLOADED')
# now, wait until the server is active
waiters.wait_for_server_status(self.servers_client,
self.server_id, 'ACTIVE')
self.check_lease_creation(lease_data)
# wait for lease end
created_lease = self.get_lease_by_name(lease_name)
self.wait_for_lease_end(created_lease['id'])
# check server final status
self.check_server_is_snapshoted()
waiters.wait_for_server_termination(self.servers_client,
self.server_id)
# remove created snapshot
image_name = LEASE_IMAGE_PREFIX + self.server_name
self.remove_image_snapshot(image_name)
# remove created lease
self.delete_lease(created_lease['id'])

View File

@ -1,44 +0,0 @@
# Copyright 2017 University of Chicago. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from multiprocessing.pool import ThreadPool
from blazar_tempest_plugin.tests.scenario import (
resource_reservation_scenario as rrs)
class TestReservationConcurrencyScenario(rrs.ResourceReservationScenarioTest):
"""A Scenario test class checking Blazar handles concurrent requests."""
MAX_CONCURRENCY = 10
def setUp(self):
super(TestReservationConcurrencyScenario, self).setUp()
def tearDown(self):
super(TestReservationConcurrencyScenario, self).tearDown()
def test_concurrent_list_lease(self):
# run lease-list requests in parallel to check service concurrency
results = []
pool = ThreadPool(self.MAX_CONCURRENCY)
for i in range(0, self.MAX_CONCURRENCY):
results.append(
pool.apply_async(self.reservation_client.list_lease, ()))
pool.close()
pool.join()
results = [r.get() for r in results]
for r in results:
self.assertEqual('200', r.response['status'])

View File

@ -30,6 +30,8 @@
cat << 'EOF' >>"/tmp/dg-local.conf" cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]] [[local|localrc]]
enable_plugin blazar git://git.openstack.org/openstack/blazar enable_plugin blazar git://git.openstack.org/openstack/blazar
# Enable Tempest plugin
TEMPEST_PLUGINS='/opt/stack/new/blazar-tempest-plugin'
EOF EOF
executable: /bin/bash executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace' chdir: '{{ ansible_user_dir }}/workspace'
@ -45,8 +47,8 @@
export PROJECTS="openstack/blazar $PROJECTS" export PROJECTS="openstack/blazar $PROJECTS"
export PROJECTS="openstack/blazar-nova $PROJECTS" export PROJECTS="openstack/blazar-nova $PROJECTS"
export PROJECTS="openstack/python-blazarclient $PROJECTS" export PROJECTS="openstack/python-blazarclient $PROJECTS"
export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1 export PROJECTS="openstack/blazar-tempest-plugin $PROJECTS"
export DEVSTACK_GATE_TEMPEST_REGEX="blazar_tempest_plugin.tests" export DEVSTACK_GATE_TEMPEST_REGEX="blazar_tempest_plugin"
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash executable: /bin/bash

View File

@ -24,7 +24,6 @@ setup-hooks = pbr.hooks.setup_hook
[files] [files]
packages = packages =
blazar blazar
blazar_tempest_plugin
[entry_points] [entry_points]
console_scripts = console_scripts =
@ -59,9 +58,6 @@ oslo.config.opts =
wsgi_scripts = wsgi_scripts =
blazar-api-wsgi = blazar.api.wsgi_app:init_app blazar-api-wsgi = blazar.api.wsgi_app:init_app
tempest.test_plugins =
blazar_tests = blazar_tempest_plugin.plugin:BlazarTempestPlugin
[build_sphinx] [build_sphinx]
all_files = 1 all_files = 1
build-dir = doc/build build-dir = doc/build