Remove integration and configuration tests

This patch propose to use heat tempest plugin for Integration tests
(with no co-gating potential) and configuration tests.
Also we will remove test_autoscaling_lb from tempest plugin as well,
since it's no longer been used.
Remove senario base, since we now move all senario tests to plugin.

Change-Id: Ice6a0e1fe8ce2e1de5253c674d11949b0f8a6e31
This commit is contained in:
ricolin 2017-12-22 13:38:52 +08:00 committed by Zane Bitter
parent ba86129965
commit aed1e6f368
29 changed files with 6 additions and 2838 deletions

View File

@ -96,32 +96,9 @@ HeatGroup = [
cfg.StrOpt('floating_network_name',
default='public',
help="Visible floating network name "),
cfg.StrOpt('boot_config_env',
default=('heat_integrationtests/scenario/templates'
'/boot_config_none_env.yaml'),
help="Path to environment file which defines the "
"resource type Heat::InstallConfigAgent. Needs to "
"be appropriate for the image_ref."),
cfg.StrOpt('fixed_subnet_name',
default='heat-subnet',
help="Visible fixed sub-network name "),
cfg.IntOpt('ssh_timeout',
default=300,
help="Timeout in seconds to wait for authentication to "
"succeed."),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help="IP version used for SSH connections."),
cfg.IntOpt('ssh_channel_timeout',
default=60,
help="Timeout in seconds to wait for output from ssh "
"channel."),
cfg.IntOpt('tenant_network_mask_bits',
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.BoolOpt('skip_scenario_tests',
default=False,
help="Skip all scenario tests"),
cfg.BoolOpt('skip_functional_tests',
default=False,
help="Skip all functional tests"),
@ -129,10 +106,6 @@ HeatGroup = [
help="List of functional test class or class.method "
"names to skip ex. AutoscalingGroupTest, "
"InstanceGroupBasicTest.test_size_updates_work"),
cfg.ListOpt('skip_scenario_test_list',
help="List of scenario test class or class.method "
"names to skip ex. NeutronLoadBalancerTest, "
"AodhAlarmTest.test_alarm"),
cfg.ListOpt('skip_test_stack_action_list',
help="List of stack actions in tests to skip "
"ex. ABANDON, ADOPT, SUSPEND, RESUME"),
@ -140,9 +113,6 @@ HeatGroup = [
default=True,
help="Test features that are only present for stacks with "
"convergence enabled."),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
cfg.IntOpt('connectivity_timeout',
default=120,
help="Timeout in seconds to wait for connectivity to "

View File

@ -1,202 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import select
import socket
import time
from oslo_log import log as logging
import paramiko
import six
from heat_integrationtests.common import exceptions
LOG = logging.getLogger(__name__)
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, six.string_types):
pkey = paramiko.RSAKey.from_private_key(
six.moves.cStringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1):
"""Returns an ssh connection to the specified host."""
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
if self.pkey is not None:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with public key authentication",
self.host, self.username)
else:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with password %s",
self.host, self.username, str(self.password))
attempts = 0
while True:
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.channel_timeout, pkey=self.pkey)
LOG.info("ssh connection to %s@%s successfuly created",
self.username, self.host)
return ssh
except (socket.error,
paramiko.SSHException) as e:
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
" connection to %s@%s after %d attempts",
self.username, self.host, attempts)
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
bsleep += backoff
attempts += 1
LOG.warning("Failed to establish authenticated ssh"
" connection to %s@%s (%s). Number attempts: %s."
" Retry after %d seconds.",
self.username, self.host, e, attempts, bsleep)
time.sleep(bsleep)
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def exec_command(self, cmd):
"""Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
connection = self._get_ssh_connection()
connection.close()
class RemoteClient(object):
# NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None,
conf=None):
self.conf = conf
ssh_timeout = self.conf.ssh_timeout
network = self.conf.network_for_ssh
ip_version = self.conf.ip_version_for_ssh
ssh_channel_timeout = self.conf.ssh_channel_timeout
if isinstance(server, six.string_types):
ip_address = server
else:
addresses = server['addresses'][network]
for address in addresses:
if address['version'] == ip_version:
ip_address = address['addr']
break
else:
raise exceptions.ServerUnreachable()
self.ssh_client = Client(ip_address, username, password,
ssh_timeout, pkey=pkey,
channel_timeout=ssh_channel_timeout)
def exec_command(self, cmd):
return self.ssh_client.exec_command(cmd)
def validate_authentication(self):
"""Validate ssh connection and authentication.
This method raises an Exception when the validation fails.
"""
self.ssh_client.test_connection_auth()
def get_partitions(self):
# Return the contents of /proc/partitions
command = 'cat /proc/partitions'
output = self.exec_command(command)
return output
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
boot_time = time.time() - int(boot_secs)
return time.localtime(boot_time)
def write_to_console(self, message):
message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
# usually to /dev/ttyS0
cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
return self.exec_command(cmd)
def ping_host(self, host):
cmd = 'ping -c1 -w1 %s' % host
return self.exec_command(cmd)
def get_ip_list(self):
cmd = "/bin/ip address"
return self.exec_command(cmd)

View File

@ -19,7 +19,6 @@ import time
import fixtures
from heatclient import exc as heat_exceptions
from keystoneauth1 import exceptions as kc_exceptions
from neutronclient.common import exceptions as network_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import six
@ -30,7 +29,6 @@ import testtools
from heat_integrationtests.common import clients
from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
from heat_integrationtests.common import remote_client
LOG = logging.getLogger(__name__)
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
@ -116,25 +114,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def setup_clients_for_admin(self):
self.setup_clients(self.conf, True)
def get_remote_client(self, server_or_ip, username, private_key=None):
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
network_name_for_ssh = self.conf.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if private_key is None:
private_key = self.keypair.private_key
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
conf=self.conf)
try:
linux_client.validate_authentication()
except exceptions.SSHTimeout:
LOG.exception('ssh connection to %s failed', ip)
raise
return linux_client
def check_connectivity(self, check_ip):
def try_connect(ip):
try:
@ -199,13 +178,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
if net['name'] == net_name:
return net
def is_network_extension_supported(self, extension_alias):
try:
self.network_client.show_extension(extension_alias)
except network_exceptions.NeutronClientException:
return False
return True
def is_service_available(self, service_type):
try:
self.identity_client.get_endpoint_url(

View File

@ -1,101 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
test_template = '''
heat_template_version: 2015-04-30
description: Test template to create port wit ip_address.
parameters:
mac:
type: string
default: 00-00-00-00-BB-BB
resources:
net:
type: OS::Neutron::Net
subnet:
type: OS::Neutron::Subnet
properties:
enable_dhcp: false
network: { get_resource: net }
cidr: 11.11.11.0/24
port:
type: OS::Neutron::Port
properties:
network: {get_resource: net}
mac_address: {get_param: mac}
fixed_ips:
- subnet: {get_resource: subnet}
ip_address: 11.11.11.11
test:
depends_on: port
type: OS::Heat::TestResource
properties:
value: Test1
fail: False
outputs:
port_ip:
value: {get_attr: [port, fixed_ips, 0, ip_address]}
mac_address:
value: {get_attr: [port, mac_address]}
'''
class UpdatePortTest(functional_base.FunctionalTestsBase):
def get_port_id_and_outputs(self, stack_identifier):
resources = self.client.resources.list(stack_identifier)
port_id = [res.physical_resource_id for res in resources
if res.resource_name == 'port']
stack = self.client.stacks.get(stack_identifier)
port_ip = self._stack_output(stack, 'port_ip')
port_mac = self._stack_output(stack, 'mac_address')
return port_id[0], port_ip, port_mac
def test_update_remove_ip(self):
# create with defined ip_address
stack_identifier = self.stack_create(template=test_template)
_id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
# remove ip_address property and update stack
templ_no_ip = test_template.replace('ip_address: 11.11.11.11', '')
self.update_stack(stack_identifier, templ_no_ip)
new_id, new_ip, new_mac = self.get_port_id_and_outputs(
stack_identifier)
# port should be updated with the same id
self.assertEqual(_id, new_id)
self.assertEqual(_mac, new_mac)
def test_update_with_mac_address(self):
if not self.conf.admin_username or not self.conf.admin_password:
self.skipTest('No admin creds found, skipping')
# Setup admin clients for updating mac_address
self.setup_clients_for_admin()
# Create with default mac_address and defined ip_address
stack_identifier = self.stack_create(template=test_template)
_id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
# Update with another 'mac' parameter
parameters = {'mac': '00-00-00-00-AA-AA'}
self.update_stack(stack_identifier, test_template,
parameters=parameters)
new_id, new_ip, new_mac = self.get_port_id_and_outputs(
stack_identifier)
# mac_address should be different
self.assertEqual(_id, new_id)
self.assertEqual(_ip, new_ip)
self.assertNotEqual(_mac, new_mac)

View File

@ -1,127 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
test_template = '''
heat_template_version: 2015-04-30
description: Test template to create/update subnet with allocation_pools.
resources:
net:
type: OS::Neutron::Net
subnet:
type: OS::Neutron::Subnet
properties:
network: { get_resource: net }
cidr: 11.11.11.0/24
gateway_ip: 11.11.11.5
allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]
outputs:
alloc_pools:
value: {get_attr: [subnet, allocation_pools]}
gateway_ip:
value: {get_attr: [subnet, gateway_ip]}
'''
class UpdateSubnetTest(functional_base.FunctionalTestsBase):
def get_outputs(self, stack_identifier, output_key):
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, output_key)
return output
def test_update_allocation_pools(self):
stack_identifier = self.stack_create(template=test_template)
alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
alloc_pools)
# Update allocation_pools with a new range
templ_other_pool = test_template.replace(
'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.100}]')
self.update_stack(stack_identifier, templ_other_pool)
new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
# the new pools should be the new range
self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.100'}],
new_alloc_pools)
def test_update_allocation_pools_to_empty(self):
stack_identifier = self.stack_create(template=test_template)
alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
alloc_pools)
# Update allocation_pools with []
templ_empty_pools = test_template.replace(
'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
'allocation_pools: []')
self.update_stack(stack_identifier, templ_empty_pools)
new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
# new_alloc_pools should be []
self.assertEqual([], new_alloc_pools)
def test_update_to_no_allocation_pools(self):
stack_identifier = self.stack_create(template=test_template)
alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
alloc_pools)
# Remove the allocation_pools from template
templ_no_pools = test_template.replace(
'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
'')
self.update_stack(stack_identifier, templ_no_pools)
last_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
# last_alloc_pools should be []
self.assertEqual([], last_alloc_pools)
def test_update_gateway_ip(self):
stack_identifier = self.stack_create(template=test_template)
gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
self.assertEqual('11.11.11.5', gw_ip)
# Update gateway_ip
templ_other_gw_ip = test_template.replace(
'gateway_ip: 11.11.11.5', 'gateway_ip: 11.11.11.9')
self.update_stack(stack_identifier, templ_other_gw_ip)
new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
# the gateway_ip should be the new one
self.assertEqual('11.11.11.9', new_gw_ip)
def test_update_gateway_ip_to_empty(self):
stack_identifier = self.stack_create(template=test_template)
gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
self.assertEqual('11.11.11.5', gw_ip)
# Update gateway_ip to null(resolve to '')
templ_empty_gw_ip = test_template.replace(
'gateway_ip: 11.11.11.5', 'gateway_ip: null')
self.update_stack(stack_identifier, templ_empty_gw_ip)
new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
# new gateway_ip should be None
self.assertIsNone(new_gw_ip)
def test_update_to_no_gateway_ip(self):
stack_identifier = self.stack_create(template=test_template)
gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
self.assertEqual('11.11.11.5', gw_ip)
# Remove the gateway from template
templ_no_gw_ip = test_template.replace(
'gateway_ip: 11.11.11.5', '')
self.update_stack(stack_identifier, templ_no_gw_ip)
new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
# new gateway_ip should be None
self.assertIsNone(new_gw_ip)

View File

@ -1,275 +0,0 @@
# Copyright (c) 2017 Ericsson.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import yaml
from heat_integrationtests.functional import functional_base
test_template = '''
heat_template_version: pike
description: Test template to create, update, delete trunk.
resources:
parent_net:
type: OS::Neutron::Net
trunk_net_one:
type: OS::Neutron::Net
trunk_net_two:
type: OS::Neutron::Net
parent_subnet:
type: OS::Neutron::Subnet
properties:
network: { get_resource: parent_net }
cidr: 10.0.0.0/16
trunk_subnet_one:
type: OS::Neutron::Subnet
properties:
network: { get_resource: trunk_net_one }
cidr: 10.10.0.0/16
trunk_subnet_two:
type: OS::Neutron::Subnet
properties:
network: { get_resource: trunk_net_two }
cidr: 10.20.0.0/16
parent_port:
type: OS::Neutron::Port
properties:
network: { get_resource: parent_net }
name: trunk_parent_port
sub_port_one:
type: OS::Neutron::Port
properties:
network: { get_resource: trunk_net_one }
name: trunk_sub_port_one
sub_port_two:
type: OS::Neutron::Port
properties:
network: { get_resource: trunk_net_two }
name: trunk_sub_port_two
trunk:
type: OS::Neutron::Trunk
properties:
name: test_trunk
port: { get_resource: parent_port }
sub_ports:
outputs:
trunk_parent_port:
value: { get_attr: [trunk, port_id] }
'''
class UpdateTrunkTest(functional_base.FunctionalTestsBase):
@staticmethod
def _sub_ports_dict_to_set(sub_ports):
new_sub_ports = copy.deepcopy(sub_ports)
# NOTE(lajos katona): In the template we have to give the sub port as
# port, but from trunk_details we receive back them with port_id.
# As an extra trunk_details contains the mac_address as well which is
# useless here.
# So here we have to make sure that the dictionary (input from
# template or output from trunk_details) have the same keys:
if any('mac_address' in d for d in new_sub_ports):
for sp in new_sub_ports:
sp['port'] = sp['port_id']
del sp['port_id']
del sp['mac_address']
# NOTE(lajos katona): We receive lists (trunk_details['sub_ports'] and
# the input to the template) and we can't be sure that the order is the
# same, so by using sets we can compare them.
sub_ports_set = {frozenset(d.items()) for d in new_sub_ports}
return sub_ports_set
def test_add_first_sub_port(self):
stack_identifier = self.stack_create(template=test_template)
parsed_template = yaml.safe_load(test_template)
new_sub_port = [{'port': {'get_resource': 'sub_port_one'},
'segmentation_id': 10,
'segmentation_type': 'vlan'}]
parsed_template['resources']['trunk']['properties'][
'sub_ports'] = new_sub_port
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, updated_template)
# Fix the port_id in the template for assertion
new_sub_port[0]['port'] = self.get_physical_resource_id(
stack_identifier, 'sub_port_one')
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_sub_port = parent_port['trunk_details']['sub_ports']
self.assertEqual(self._sub_ports_dict_to_set(new_sub_port),
self._sub_ports_dict_to_set(trunk_sub_port))
def test_add_a_second_sub_port(self):
parsed_template = yaml.safe_load(test_template)
sub_ports = [{'port': {'get_resource': 'sub_port_one'},
'segmentation_type': 'vlan',
'segmentation_id': 10}, ]
parsed_template['resources']['trunk']['properties'][
'sub_ports'] = sub_ports
template_with_sub_ports = yaml.safe_dump(parsed_template)
stack_identifier = self.stack_create(template=template_with_sub_ports)
new_sub_port = {'port': {'get_resource': 'sub_port_two'},
'segmentation_id': 20,
'segmentation_type': 'vlan'}
parsed_template['resources']['trunk']['properties'][
'sub_ports'].append(new_sub_port)
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, updated_template)
# Fix the port_ids in the templates for assertion
sub_ports[0]['port'] = self.get_physical_resource_id(
stack_identifier, 'sub_port_one')
new_sub_port['port'] = self.get_physical_resource_id(
stack_identifier, 'sub_port_two')
expected_sub_ports = [sub_ports[0], new_sub_port]
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_sub_ports = parent_port['trunk_details']['sub_ports']
self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
self._sub_ports_dict_to_set(trunk_sub_ports))
def test_remove_sub_port_from_trunk(self):
sub_ports = [{'port': {'get_resource': 'sub_port_one'},
'segmentation_type': 'vlan',
'segmentation_id': 10},
{'port': {'get_resource': 'sub_port_two'},
'segmentation_type': 'vlan',
'segmentation_id': 20}]
parsed_template = yaml.safe_load(test_template)
parsed_template['resources']['trunk']['properties'][
'sub_ports'] = sub_ports
template_with_sub_ports = yaml.safe_dump(parsed_template)
stack_identifier = self.stack_create(template=template_with_sub_ports)
sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_two'},
'segmentation_type': 'vlan',
'segmentation_id': 20}
parsed_template['resources']['trunk'][
'properties']['sub_ports'].remove(sub_port_to_be_removed)
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, updated_template)
# Fix the port_ids in the templates for assertion
sub_ports[0]['port'] = self.get_physical_resource_id(
stack_identifier, 'sub_port_one')
expected_sub_ports = [sub_ports[0]]
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_sub_ports = parent_port['trunk_details']['sub_ports']
self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
self._sub_ports_dict_to_set(trunk_sub_ports))
def test_remove_last_sub_port_from_trunk(self):
sub_ports = [{'port': {'get_resource': 'sub_port_one'},
'segmentation_type': 'vlan',
'segmentation_id': 10}]
parsed_template = yaml.safe_load(test_template)
parsed_template['resources']['trunk']['properties'][
'sub_ports'] = sub_ports
template_with_sub_ports = yaml.safe_dump(parsed_template)
stack_identifier = self.stack_create(template=template_with_sub_ports)
sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_one'},
'segmentation_type': 'vlan',
'segmentation_id': 10}
parsed_template['resources']['trunk'][
'properties']['sub_ports'] = []
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, updated_template)
sub_port_to_be_removed['port'] = self.get_physical_resource_id(
stack_identifier, 'sub_port_one')
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_sub_ports = parent_port['trunk_details']['sub_ports']
self.assertNotEqual(
self._sub_ports_dict_to_set([sub_port_to_be_removed]),
self._sub_ports_dict_to_set(trunk_sub_ports))
self.assertFalse(trunk_sub_ports,
'The returned sub ports (%s) in trunk_details is '
'not empty!' % trunk_sub_ports)
def test_update_existing_sub_port_on_trunk(self):
sub_ports = [{'port': {'get_resource': 'sub_port_one'},
'segmentation_type': 'vlan',
'segmentation_id': 10}]
parsed_template = yaml.safe_load(test_template)
parsed_template['resources']['trunk']['properties'][
'sub_ports'] = sub_ports
template_with_sub_ports = yaml.safe_dump(parsed_template)
stack_identifier = self.stack_create(template=template_with_sub_ports)
sub_port_id = self.get_physical_resource_id(
stack_identifier, 'sub_port_one')
parsed_template['resources']['trunk']['properties']['sub_ports'][0][
'segmentation_id'] = 99
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, updated_template)
updated_sub_port = {'port': sub_port_id,
'segmentation_type': 'vlan',
'segmentation_id': 99}
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_sub_ports = parent_port['trunk_details']['sub_ports']
self.assertEqual(self._sub_ports_dict_to_set([updated_sub_port]),
self._sub_ports_dict_to_set(trunk_sub_ports))
def test_update_trunk_name_and_description(self):
new_name = 'pineapple'
new_description = 'This is a test trunk'
stack_identifier = self.stack_create(template=test_template)
parsed_template = yaml.safe_load(test_template)
parsed_template['resources']['trunk']['properties']['name'] = new_name
parsed_template['resources']['trunk']['properties'][
'description'] = new_description
updated_template = yaml.safe_dump(parsed_template)
self.update_stack(stack_identifier, template=updated_template)
parent_id = self.get_stack_output(
stack_identifier, 'trunk_parent_port')
parent_port = self.network_client.show_port(parent_id)['port']
trunk_id = parent_port['trunk_details']['trunk_id']
trunk = self.network_client.show_trunk(trunk_id)['trunk']
self.assertEqual(new_name, trunk['name'])
self.assertEqual(new_description, trunk['description'])

View File

@ -1,65 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class EncryptedParametersTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2014-10-16
parameters:
image:
type: string
flavor:
type: string
network:
type: string
foo:
type: string
description: 'parameter with encryption turned on'
hidden: true
default: secret
resources:
server_with_encrypted_property:
type: OS::Nova::Server
properties:
name: { get_param: foo }
image: { get_param: image }
flavor: { get_param: flavor }
networks: [{network: {get_param: network} }]
outputs:
encrypted_foo_param:
description: 'encrypted param'
value: { get_param: foo }
'''
def test_db_encryption(self):
# Create a stack with the value of 'foo' to be encrypted
foo_param = 'my_encrypted_foo'
parameters = {
"image": self.conf.minimal_image_ref,
"flavor": self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
"foo": foo_param
}
stack_identifier = self.stack_create(
template=self.template,
parameters=parameters
)
stack = self.client.stacks.get(stack_identifier)
# Verify the output value for 'foo' parameter
for out in stack.outputs:
if out['output_key'] == 'encrypted_foo_param':
self.assertEqual(foo_param, out['output_value'])

View File

@ -1,87 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
test_encryption_vol_type = {
'heat_template_version': '2015-04-30',
'description': 'Test template to create encryption volume type.',
'resources': {
'my_volume_type': {
'type': 'OS::Cinder::VolumeType',
'properties': {
'name': 'LUKS'
}
},
'my_encrypted_vol_type': {
'type': 'OS::Cinder::EncryptedVolumeType',
'properties': {
'provider': 'nova.volume.encryptors.luks.LuksEncryptor',
'control_location': 'front-end',
'cipher': 'aes-xts-plain64',
'key_size': 512,
'volume_type': {'get_resource': 'my_volume_type'}
}
}
}
}
class EncryptionVolTypeTest(functional_base.FunctionalTestsBase):
def setUp(self):
super(EncryptionVolTypeTest, self).setUp()
if not self.conf.admin_username or not self.conf.admin_password:
self.skipTest('No admin creds found, skipping')
# cinder security policy usage of volume type is limited
# to being used by administrators only.
# Switch to admin
self.setup_clients_for_admin()
def check_stack(self, sid):
vt = 'my_volume_type'
e_vt = 'my_encrypted_vol_type'
# check if only two resources are present.
expected_resources = {vt: 'OS::Cinder::VolumeType',
e_vt: 'OS::Cinder::EncryptedVolumeType'}
self.assertEqual(expected_resources,
self.list_resources(sid))
e_vt_obj = self.client.resources.get(sid, e_vt)
my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
'resources']['my_encrypted_vol_type']['properties']
# check if the phy rsrc specs was created in accordance with template.
phy_rsrc_specs = self.volume_client.volume_encryption_types.get(
e_vt_obj.physical_resource_id)
self.assertEqual(my_encrypted_vol_type_tmpl_prop['key_size'],
phy_rsrc_specs.key_size)
self.assertEqual(my_encrypted_vol_type_tmpl_prop['provider'],
phy_rsrc_specs.provider)
self.assertEqual(my_encrypted_vol_type_tmpl_prop['cipher'],
phy_rsrc_specs.cipher)
self.assertEqual(my_encrypted_vol_type_tmpl_prop['control_location'],
phy_rsrc_specs.control_location)
def test_create_update(self):
stack_identifier = self.stack_create(
template=test_encryption_vol_type)
self.check_stack(stack_identifier)
# Change some properties and trigger update.
my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
'resources']['my_encrypted_vol_type']['properties']
my_encrypted_vol_type_tmpl_prop['key_size'] = 256
my_encrypted_vol_type_tmpl_prop['cipher'] = 'aes-cbc-essiv'
self.update_stack(stack_identifier, test_encryption_vol_type)
self.check_stack(stack_identifier)

View File

@ -1,149 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
server_with_sub_fixed_ip_template = '''
heat_template_version: 2016-04-08
description: Test template to test nova server with subnet and fixed_ip.
parameters:
flavor:
type: string
image:
type: string
resources:
net:
type: OS::Neutron::Net
properties:
name: my_net
subnet:
type: OS::Neutron::Subnet
properties:
network: {get_resource: net}
cidr: 11.11.11.0/24
security_group:
type: OS::Neutron::SecurityGroup
properties:
name: the_sg
description: Ping and SSH
rules:
- protocol: icmp
- protocol: tcp
port_range_min: 22
port_range_max: 22
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
networks:
- subnet: {get_resource: subnet}
fixed_ip: 11.11.11.11
security_groups:
- {get_resource: security_group}
outputs:
networks:
value: {get_attr: [server, networks]}
'''
server_with_port_template = '''
heat_template_version: 2016-04-08
description: Test template to test nova server with port.
parameters:
flavor:
type: string
image:
type: string
resources:
net:
type: OS::Neutron::Net
properties:
name: server_with_port_net
subnet:
type: OS::Neutron::Subnet
properties:
network: {get_resource: net}
cidr: 11.11.11.0/24
port:
type: OS::Neutron::Port
properties:
network: {get_resource: net}
fixed_ips:
- subnet: {get_resource: subnet}
ip_address: 11.11.11.11
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
networks:
- port: {get_resource: port}
'''
class CreateServerTest(functional_base.FunctionalTestsBase):
def get_outputs(self, stack_identifier, output_key):
stack = self.client.stacks.get(stack_identifier)
return self._stack_output(stack, output_key)
def test_create_server_with_subnet_fixed_ip_sec_group(self):
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
template=server_with_sub_fixed_ip_template,
stack_name='server_with_sub_ip',
parameters=parms)
networks = self.get_outputs(stack_identifier, 'networks')
self.assertEqual(['11.11.11.11'], networks['my_net'])
server_resource = self.client.resources.get(
stack_identifier, 'server')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
self.assertEqual([{"name": "the_sg"}], server.security_groups)
def test_create_update_server_with_subnet(self):
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref}
template = server_with_sub_fixed_ip_template.replace(
'fixed_ip: 11.11.11.11',
'fixed_ip: 11.11.11.22').replace(
'name: my_net', 'name: your_net')
stack_identifier = self.stack_create(
template=template,
stack_name='create_server_with_sub_ip',
parameters=parms)
networks = self.get_outputs(stack_identifier, 'networks')
self.assertEqual(['11.11.11.22'], networks['your_net'])
# update the server only with subnet, we won't pass
# both port_id and net_id to attach interface, then update success
template_only_subnet = template.replace(
'fixed_ip: 11.11.11.22', '')
self.update_stack(stack_identifier,
template_only_subnet,
parameters=parms)
new_networks = self.get_outputs(stack_identifier, 'networks')
self.assertNotEqual(['11.11.11.22'], new_networks['your_net'])
def test_create_server_with_port(self):
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref}
# We just want to make sure we can create the server, no need to assert
# anything
self.stack_create(
template=server_with_port_template,
stack_name='server_with_port',
parameters=parms)

View File

@ -1,107 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class OSWaitCondition(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2013-05-23
parameters:
flavor:
type: string
image:
type: string
network:
type: string
timeout:
type: number
default: 60
resources:
instance1:
type: OS::Nova::Server
properties:
flavor: {get_param: flavor}
image: {get_param: image}
networks:
- network: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: '#!/bin/sh
wc_notify --data-binary ''{"status": "SUCCESS"}''
# signals with reason
wc_notify --data-binary ''{"status": "SUCCESS", "reason":
"signal2"}''
# signals with data
wc_notify --data-binary ''{"status": "SUCCESS", "reason":
"signal3", "data": "data3"}''
wc_notify --data-binary ''{"status": "SUCCESS", "reason":
"signal4", "data": "data4"}''
# check signals with the same number
wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
# loop for 20 signals without reasons and data
for i in `seq 1 20`; do wc_notify --data-binary ''{"status":
"SUCCESS"}'' & done
wait
'
params:
wc_notify:
get_attr: [wait_handle, curl_cli]
wait_condition:
type: OS::Heat::WaitCondition
depends_on: instance1
properties:
count: 25
handle: {get_resource: wait_handle}
timeout: {get_param: timeout}
wait_handle:
type: OS::Heat::WaitConditionHandle
outputs:
curl_cli:
value:
get_attr: [wait_handle, curl_cli]
wc_data:
value:
get_attr: [wait_condition, data]
'''
def setUp(self):
super(OSWaitCondition, self).setUp()
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.minimal_instance_type:
raise self.skipException("No minimal flavor configured to test")
def test_create_stack_with_multi_signal_waitcondition(self):
params = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'network': self.conf.fixed_network_name,
'timeout': 120}
self.stack_create(template=self.template, parameters=params)

View File

@ -1,144 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import exc
import six
from heat_integrationtests.functional import functional_base
class RemoteStackTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2013-05-23
resources:
my_stack:
type: OS::Heat::Stack
properties:
context:
region_name: RegionOne
template:
get_file: remote_stack.yaml
outputs:
key:
value: {get_attr: [my_stack, outputs]}
'''
remote_template = '''
heat_template_version: 2013-05-23
resources:
random1:
type: OS::Heat::RandomString
outputs:
remote_key:
value: {get_attr: [random1, value]}
'''
def setUp(self):
super(RemoteStackTest, self).setUp()
# replacing the template region with the one from the config
self.template = self.template.replace('RegionOne',
self.conf.region)
def test_remote_stack_alone(self):
stack_id = self.stack_create(template=self.remote_template)
expected_resources = {'random1': 'OS::Heat::RandomString'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
stack = self.client.stacks.get(stack_id)
output_value = self._stack_output(stack, 'remote_key')
self.assertEqual(32, len(output_value))
def test_stack_create(self):
files = {'remote_stack.yaml': self.remote_template}
stack_id = self.stack_create(files=files)
expected_resources = {'my_stack': 'OS::Heat::Stack'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
stack = self.client.stacks.get(stack_id)
output = self._stack_output(stack, 'key')
parent_output_value = output['remote_key']
self.assertEqual(32, len(parent_output_value))
rsrc = self.client.resources.get(stack_id, 'my_stack')
remote_id = rsrc.physical_resource_id
rstack = self.client.stacks.get(remote_id)
self.assertEqual(remote_id, rstack.id)
remote_output_value = self._stack_output(rstack, 'remote_key')
self.assertEqual(32, len(remote_output_value))
self.assertEqual(parent_output_value, remote_output_value)
remote_resources = {'random1': 'OS::Heat::RandomString'}
self.assertEqual(remote_resources, self.list_resources(remote_id))
def test_stack_create_bad_region(self):
tmpl_bad_region = self.template.replace(self.conf.region, 'DARKHOLE')
files = {'remote_stack.yaml': self.remote_template}
kwargs = {
'template': tmpl_bad_region,
'files': files
}
ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
error_msg = ('ERROR: Cannot establish connection to Heat endpoint '
'at region "DARKHOLE" due to "publicURL endpoint for '
'orchestration service in DARKHOLE region not found"')
self.assertEqual(error_msg, six.text_type(ex))
def test_stack_resource_validation_fail(self):
tmpl_bad_format = self.remote_template.replace('resources', 'resource')
files = {'remote_stack.yaml': tmpl_bad_format}
kwargs = {'files': files}
ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
error_msg = ('ERROR: Failed validating stack template using Heat '
'endpoint at region "%s" due to '
'"ERROR: The template section is '
'invalid: resource"') % self.conf.region
self.assertEqual(error_msg, six.text_type(ex))
def test_stack_update(self):
files = {'remote_stack.yaml': self.remote_template}
stack_id = self.stack_create(files=files)
expected_resources = {'my_stack': 'OS::Heat::Stack'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
rsrc = self.client.resources.get(stack_id, 'my_stack')
physical_resource_id = rsrc.physical_resource_id
rstack = self.client.stacks.get(physical_resource_id)
self.assertEqual(physical_resource_id, rstack.id)
remote_resources = {'random1': 'OS::Heat::RandomString'}
self.assertEqual(remote_resources,
self.list_resources(rstack.id))
# do an update
update_template = self.remote_template.replace('random1', 'random2')
files = {'remote_stack.yaml': update_template}
self.update_stack(stack_id, self.template, files=files)
# check if the remote stack is still there with the same ID
self.assertEqual(expected_resources, self.list_resources(stack_id))
rsrc = self.client.resources.get(stack_id, 'my_stack')
physical_resource_id = rsrc.physical_resource_id
rstack = self.client.stacks.get(physical_resource_id)
self.assertEqual(physical_resource_id, rstack.id)
remote_resources = {'random2': 'OS::Heat::RandomString'}
self.assertEqual(remote_resources,
self.list_resources(rstack.id))
def test_stack_suspend_resume(self):
files = {'remote_stack.yaml': self.remote_template}
stack_id = self.stack_create(files=files)
self.stack_suspend(stack_id)
self.stack_resume(stack_id)

View File

@ -49,7 +49,6 @@ function _config_iniset {
iniset $conf_file heat_plugin minimal_instance_type m1.heat_micro
iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-26-1.5.x86_64
iniset $conf_file heat_plugin boot_config_env $DEST/heat-templates/hot/software-config/boot-config/test_image_env.yaml
iniset $conf_file heat_plugin minimal_image_ref cirros-0.3.5-x86_64-disk
# Skip ReloadOnSighupTest. Most jobs now run with apache+uwsgi, so the test has no significance
@ -57,10 +56,6 @@ function _config_iniset {
# Skip StackCancelTest till the python-heatclient is bumped
iniset $conf_file heat_plugin skip_functional_test_list 'ReloadOnSighupTest, NotificationTest, StackCancelTest'
# Skip VolumeBackupRestoreIntegrationTest skipped until failure rate can be reduced ref bug #1382300
# Skip test_server_signal_userdata_format_software_config is skipped untill bug #1651768 is resolved
iniset $conf_file heat_plugin skip_scenario_test_list 'SoftwareConfigIntegrationTest, VolumeBackupRestoreIntegrationTest'
if [ "$DISABLE_CONVERGENCE" == "true" ]; then
iniset $conf_file heat_plugin convergence_engine_enabled false
fi
@ -80,6 +75,12 @@ function _config_tempest_plugin
iniset_multiline $conf_file service_available heat_plugin True
_config_iniset $conf_file
iniset $conf_file heat_plugin heat_config_notify_script $DEST/heat-templates/hot/software-config/elements/heat-config/bin/heat-config-notify
iniset $conf_file heat_plugin boot_config_env $DEST/heat-templates/hot/software-config/boot-config/test_image_env.yaml
# Skip VolumeBackupRestoreIntegrationTest skipped until failure rate can be reduced ref bug #1382300
# Skip test_server_signal_userdata_format_software_config is skipped untill bug #1651768 is resolved
iniset $conf_file heat_plugin skip_scenario_test_list 'SoftwareConfigIntegrationTest, VolumeBackupRestoreIntegrationTest'
cat $conf_file
}

View File

@ -1,63 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import reflection
from heat_integrationtests.common import test
class ScenarioTestsBase(test.HeatIntegrationTest):
"""This class defines common parameters for scenario tests."""
def setUp(self):
super(ScenarioTestsBase, self).setUp()
self.check_skip()
self.sub_dir = 'templates'
self.assign_keypair()
if not self.conf.fixed_network_name:
raise self.skipException("No default network configured to test")
self.net = self._get_network()
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.minimal_instance_type:
raise self.skipException("No minimal flavor configured to test")
def launch_stack(self, template_name, expected_status='CREATE_COMPLETE',
parameters=None, **kwargs):
template = self._load_template(__file__, template_name, self.sub_dir)
parameters = parameters or {}
if kwargs.get('add_parameters'):
parameters.update(kwargs['add_parameters'])
stack_id = self.stack_create(
stack_name=kwargs.get('stack_name'),
template=template,
files=kwargs.get('files'),
parameters=parameters,
environment=kwargs.get('environment'),
expected_status=expected_status
)
return stack_id
def check_skip(self):
test_cls_name = reflection.get_class_name(self, fully_qualified=False)
test_method_name = '.'.join([test_cls_name, self._testMethodName])
test_skipped = (self.conf.skip_scenario_test_list and (
test_cls_name in self.conf.skip_scenario_test_list or
test_method_name in self.conf.skip_scenario_test_list))
if self.conf.skip_scenario_tests or test_skipped:
self.skipTest('Test disabled in conf, skipping')

View File

@ -1,65 +0,0 @@
heat_template_version: 2015-10-15
description: |
App server that is a member of Neutron Pool.
parameters:
image:
type: string
flavor:
type: string
net:
type: string
sec_group:
type: string
pool_id:
type: string
app_port:
type: number
timeout:
type: number
resources:
config:
type: OS::Test::WebAppConfig
properties:
app_port: { get_param: app_port }
wc_curl_cli: { get_attr: [ handle, curl_cli ] }
server:
type: OS::Nova::Server
properties:
image: { get_param: image }
flavor: { get_param: flavor }
networks:
- network: { get_param: net }
security_groups:
- { get_param: sec_group }
user_data_format: RAW
user_data: { get_resource: config }
handle:
type: OS::Heat::WaitConditionHandle
waiter:
type: OS::Heat::WaitCondition
depends_on: server
properties:
timeout: { get_param: timeout }
handle: { get_resource: handle }
pool_member:
type: OS::Neutron::PoolMember
depends_on: waiter
properties:
address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
pool_id: { get_param: pool_id }
protocol_port: { get_param: app_port }

View File

@ -1,5 +0,0 @@
# Defines a Heat::InstallConfigAgent config resource which performs no config.
# This environment can be used when the image already has the required agents
# installed and configured.
resource_registry:
"Heat::InstallConfigAgent": "OS::Heat::SoftwareConfig"

View File

@ -1,35 +0,0 @@
heat_template_version: 2015-10-15
description: |
Simplest web-app using netcat reporting only hostname.
Specifically tailored for minimal Cirros image.
parameters:
app_port:
type: number
wc_curl_cli:
type: string
resources:
webapp_nc:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: |
#! /bin/sh -v
Body=$(hostname)
Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
wc_notify --data-binary '{"status": "SUCCESS"}'
while true ; do echo -e $Response | nc -llp PORT; done
params:
PORT: { get_param: app_port }
wc_notify: { get_param: wc_curl_cli }
outputs:
OS::stack_id:
value: { get_resource: webapp_nc }

View File

@ -1,113 +0,0 @@
heat_template_version: 2015-04-30
description: |
Template which tests Neutron load balancing requests to members of
Heat AutoScalingGroup.
Instances must be running some webserver on a given app_port
producing HTTP response that is different between servers
but stable over time for given server.
parameters:
flavor:
type: string
image:
type: string
net:
type: string
subnet:
type: string
public_net:
type: string
app_port:
type: number
default: 8080
lb_port:
type: number
default: 80
timeout:
type: number
default: 600
resources:
sec_group:
type: OS::Neutron::SecurityGroup
properties:
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: { get_param: app_port }
port_range_max: { get_param: app_port }
asg:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 1
max_size: 2
min_size: 1
resource:
type: OS::Test::NeutronAppServer
properties:
image: { get_param: image }
flavor: { get_param: flavor }
net: { get_param: net}
sec_group: { get_resource: sec_group }
app_port: { get_param: app_port }
pool_id: { get_resource: pool }
timeout: { get_param: timeout }
scale_up:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: 1
scale_down:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: -1
health_monitor:
type: OS::Neutron::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool:
type: OS::Neutron::Pool
properties:
lb_method: ROUND_ROBIN
protocol: HTTP
subnet: { get_param: subnet }
monitors:
- { get_resource: health_monitor }
vip:
protocol_port: { get_param: lb_port }
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: { get_param: public_net }
port_id:
{ get_attr: [pool, vip, 'port_id'] }
loadbalancer:
type: OS::Neutron::LoadBalancer
properties:
pool_id: { get_resource: pool }
protocol_port: { get_param: app_port }
outputs:
lburl:
description: URL of the loadbalanced app
value:
str_replace:
template: http://IP_ADDRESS:PORT
params:
IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
PORT: { get_param: lb_port }

View File

@ -1,97 +0,0 @@
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which uses a wait condition to confirm that a minimal
cfn-init and cfn-signal has worked
Parameters:
key_name:
Type: String
flavor:
Type: String
image:
Type: String
subnet:
Type: String
timeout:
Type: Number
Resources:
CfnUser:
Type: AWS::IAM::User
SmokeSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Enable only ping and SSH access
SecurityGroupIngress:
- {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
- {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
SmokeKeys:
Type: AWS::IAM::AccessKey
Properties:
UserName: {Ref: CfnUser}
ElasticIp:
Type: AWS::EC2::EIP
Properties:
Domain: vpc
SmokeServerElasticIp:
Type: AWS::EC2::EIPAssociation
Properties:
EIP: {Ref: ElasticIp}
InstanceId: {Ref: SmokeServer}
SmokeServer:
Type: AWS::EC2::Instance
Metadata:
AWS::CloudFormation::Init:
config:
files:
/tmp/smoke-status:
content: smoke test complete
/etc/cfn/cfn-credentials:
content:
Fn::Replace:
- SmokeKeys: {Ref: SmokeKeys}
SecretAccessKey:
'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
- |
AWSAccessKeyId=SmokeKeys
AWSSecretKey=SecretAccessKey
mode: '000400'
owner: root
group: root
Properties:
ImageId: {Ref: image}
InstanceType: {Ref: flavor}
KeyName: {Ref: key_name}
SubnetId: {Ref: subnet}
SecurityGroups:
- {Ref: SmokeSecurityGroup}
UserData:
Fn::Replace:
- WaitHandle: {Ref: WaitHandle}
- |
#!/bin/bash -v
/opt/aws/bin/cfn-init
/opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
--id smoke_status "WaitHandle"
WaitHandle:
Type: AWS::CloudFormation::WaitConditionHandle
WaitCondition:
Type: AWS::CloudFormation::WaitCondition
DependsOn: SmokeServer
Properties:
Handle: {Ref: WaitHandle}
Timeout: {Ref: timeout}
Outputs:
WaitConditionStatus:
Description: Contents of /tmp/smoke-status on SmokeServer
Value:
Fn::GetAtt: [WaitCondition, Data]
ElasticIp_Id:
Description: Elastic ip allocation id
Value:
Fn::GetAtt: [ElasticIp, AllocationId]
SmokeServerElasticIp:
Description: Elastic ip address of server
Value:
Ref: ElasticIp

View File

@ -1,107 +0,0 @@
heat_template_version: 2013-05-23
description: |
Template which uses a wait condition to confirm that a minimal
signalling works in a created network
parameters:
key_name:
type: string
flavor:
type: string
image:
type: string
subnet_cidr:
type: string
default: 10.100.0.0/16
timeout:
type: number
public_net:
type: string
default: public
private_net:
type: string
default: heat-net
dns_servers:
type: comma_delimited_list
default: ["8.8.8.8", "8.8.4.4"]
user_data_format:
type: string
default: RAW
resources:
sg:
type: OS::Neutron::SecurityGroup
properties:
name: the_sg
description: Ping and SSH
rules:
- protocol: icmp
- protocol: tcp
port_range_min: 22
port_range_max: 22
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: {get_param: public_net}
network:
type: OS::Neutron::Net
subnet:
type: OS::Neutron::Subnet
properties:
network: {get_resource: network}
ip_version: 4
cidr: {get_param: subnet_cidr}
dns_nameservers: {get_param: dns_servers}
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: {get_param: public_net}
router_interface:
type: OS::Neutron::RouterInterface
properties:
router: {get_resource: router}
subnet: {get_resource: subnet}
wait_handle:
type: OS::Heat::WaitConditionHandle
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
key_name: {get_param: key_name}
networks:
- subnet: {get_resource: subnet}
security_groups:
- {get_resource: sg}
user_data_format: {get_param: user_data_format}
user_data:
str_replace:
template: |
#!/bin/sh
wc_notify --data-binary '{"status": "SUCCESS", "data": "test complete"}'
params:
wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
server_floating_ip_assoc:
type: OS::Neutron::FloatingIPAssociation
properties:
floatingip_id: {get_resource: floating_ip}
port_id: {get_attr: [server, addresses, {get_resource: network}, 0, port]}
wait_condition:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle}
timeout: {get_param: timeout}
outputs:
server_ip:
value: {get_attr: [floating_ip, floating_ip_address]}
wc_data:
value: {get_attr: [wait_condition, data]}

View File

@ -1,173 +0,0 @@
heat_template_version: 2014-10-16
parameters:
key_name:
type: string
flavor:
type: string
image:
type: string
network:
type: string
signal_transport:
type: string
default: CFN_SIGNAL
software_config_transport:
type: string
default: POLL_SERVER_CFN
dep1_foo:
default: fooooo
type: string
dep1_bar:
default: baaaaa
type: string
dep2a_bar:
type: string
default: barrr
dep3_foo:
default: fo
type: string
dep3_bar:
default: ba
type: string
resources:
the_sg:
type: OS::Neutron::SecurityGroup
properties:
name: the_sg
description: Ping and SSH
rules:
- protocol: icmp
- protocol: tcp
port_range_min: 22
port_range_max: 22
cfg1:
type: OS::Heat::SoftwareConfig
properties:
group: script
inputs:
- name: foo
- name: bar
outputs:
- name: result
config: {get_file: cfg1.sh}
cfg2a:
type: OS::Heat::StructuredConfig
properties:
group: cfn-init
inputs:
- name: bar
config:
config:
files:
/tmp/cfn-init-foo:
content:
get_input: bar
mode: '000644'
cfg2b:
type: OS::Heat::SoftwareConfig
properties:
group: script
outputs:
- name: result
config: |
#!/bin/sh
echo -n "The file /tmp/cfn-init-foo contains `cat /tmp/cfn-init-foo` for server $deploy_server_id during $deploy_action" > $heat_outputs_path.result
cfg3:
type: OS::Heat::SoftwareConfig
properties:
group: puppet
inputs:
- name: foo
- name: bar
outputs:
- name: result
config: {get_file: cfg3.pp}
dep1:
type: OS::Heat::SoftwareDeployment
properties:
config:
get_resource: cfg1
server:
get_resource: server
input_values:
foo: {get_param: dep1_foo}
bar: {get_param: dep1_bar}
signal_transport: {get_param: signal_transport}
dep2a:
type: OS::Heat::StructuredDeployment
properties:
name: 10_dep2a
signal_transport: NO_SIGNAL
config:
get_resource: cfg2a
server:
get_resource: server
input_values:
bar: {get_param: dep2a_bar}
dep2b:
type: OS::Heat::SoftwareDeployment
properties:
name: 20_dep2b
config:
get_resource: cfg2b
server:
get_resource: server
signal_transport: {get_param: signal_transport}
dep3:
type: OS::Heat::SoftwareDeployment
properties:
config:
get_resource: cfg3
server:
get_resource: server
input_values:
foo: {get_param: dep3_foo}
bar: {get_param: dep3_bar}
signal_transport: {get_param: signal_transport}
cfg_user_data:
type: Heat::InstallConfigAgent
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
key_name: {get_param: key_name}
security_groups:
- {get_resource: the_sg}
networks:
- network: {get_param: network}
user_data_format: SOFTWARE_CONFIG
software_config_transport: {get_param: software_config_transport}
user_data: {get_attr: [cfg_user_data, config]}
outputs:
res1:
value:
result: {get_attr: [dep1, result]}
stdout: {get_attr: [dep1, deploy_stdout]}
stderr: {get_attr: [dep1, deploy_stderr]}
status_code: {get_attr: [dep1, deploy_status_code]}
res2:
value:
result: {get_attr: [dep2b, result]}
stdout: {get_attr: [dep2b, deploy_stdout]}
stderr: {get_attr: [dep2b, deploy_stderr]}
status_code: {get_attr: [dep2b, deploy_status_code]}
res3:
value:
result: {get_attr: [dep3, result]}
stdout: {get_attr: [dep3, deploy_stdout]}
stderr: {get_attr: [dep3, deploy_stderr]}
status_code: {get_attr: [dep3, deploy_status_code]}

View File

@ -1,118 +0,0 @@
heat_template_version: 2013-05-23
parameters:
key_name:
type: string
description: keypair to enable SSH access to the instance.
instance_type:
type: string
description: Type of the instance to be created.
image_id:
type: string
description: ID of the image to use for the instance to be created.
timeout:
type: number
description: Stack creation timeout
dev_name:
type: string
description: Expected device name for volume
default: vdb
rescan_timeout:
type: number
description: Max number of seconds to wait for volume after rescan
default: 120
backup_id:
type: string
description: backup_id to create volume from
network:
type: string
volume_description:
type: string
description: Description of volume
default: A volume description
resources:
volume:
type: OS::Cinder::Volume
properties:
backup_id: { get_param: backup_id }
description: { get_param: volume_description }
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: volume }
instance_uuid: { get_resource: instance }
instance:
type: OS::Nova::Server
properties:
image: { get_param: image_id }
flavor: { get_param: instance_type }
key_name: { get_param: key_name }
networks:
- uuid: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
# Trigger rescan to ensure we see the attached volume
for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
# Wait for the rescan as the volume doesn't appear immediately
for i in $(seq 1 rescan_timeout)
do
grep -q dev_name /proc/partitions && break
sleep 1
done
if grep -q dev_name /proc/partitions
then
mount /dev/dev_name /mnt
TESTDATA=$(cat /mnt/testfile)
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Volume Data:'$TESTDATA'", "UniqueId": "instance1"}' "wc_url"
else
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
fi
params:
wc_url: { get_resource: wait_handle }
dev_name: { get_param: dev_name }
rescan_timeout: { get_param: rescan_timeout }
wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
wait_condition:
type: AWS::CloudFormation::WaitCondition
properties:
Count: 1
Handle: { get_resource: wait_handle }
Timeout: { get_param: timeout }
outputs:
status:
description: status
value: { get_attr: ['volume', 'status'] }
size:
description: size
value: { get_attr: ['volume', 'size'] }
display_description:
description: display_description
value: { get_attr: ['volume', 'display_description'] }
volume_id:
value: { get_resource: volume }
testfile_data:
description: Contents of /mnt/testfile from the mounted volume
value: { get_attr: ['wait_condition', 'Data'] }

View File

@ -1,124 +0,0 @@
heat_template_version: 2013-05-23
parameters:
key_name:
type: string
description: keypair to enable SSH access to the instance.
instance_type:
type: string
description: Type of the instance to be created.
image_id:
type: string
description: ID of the image to use for the instance to be created.
timeout:
type: number
description: Stack creation timeout
dev_name:
type: string
description: Expected device name for volume
default: vdb
test_string:
type: string
description: Test string which is written to volume
default: ateststring
rescan_timeout:
type: number
description: Max number of seconds to wait for volume after rescan
default: 120
network:
type: string
volume_description:
type: string
description: Description of volume
default: A volume description
volume_size:
type: number
description: Size of volume
default: 1
resources:
volume:
deletion_policy: 'Snapshot'
type: OS::Cinder::Volume
properties:
size: {get_param: volume_size}
description: {get_param: volume_description}
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: volume }
instance_uuid: { get_resource: instance }
instance:
type: OS::Nova::Server
properties:
image: { get_param: image_id }
flavor: { get_param: instance_type }
key_name: { get_param: key_name }
networks:
- uuid: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
# Trigger rescan to ensure we see the attached volume
for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
# Wait for the rescan as the volume doesn't appear immediately
for i in $(seq 1 rescan_timeout)
do
grep -q dev_name /proc/partitions && break
sleep 1
done
if grep -q dev_name /proc/partitions
then
mkfs.ext4 /dev/dev_name
mount /dev/dev_name /mnt
echo "test_string" > /mnt/testfile
umount /mnt
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Completed volume configuration.", "UniqueId": "instance1"}' "wc_url"
else
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
fi
params:
wc_url: { get_resource: wait_handle }
dev_name: { get_param: dev_name }
rescan_timeout: { get_param: rescan_timeout }
test_string: { get_param: test_string }
wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
wait_condition:
type: AWS::CloudFormation::WaitCondition
properties:
Count: 1
Handle: { get_resource: wait_handle }
Timeout: { get_param: timeout }
outputs:
status:
description: status
value: { get_attr: ['volume', 'status'] }
size:
description: size
value: { get_attr: ['volume', 'size'] }
display_description:
description: display_description
value: { get_attr: ['volume', 'display_description'] }
volume_id:
value: { get_resource: volume }

View File

@ -1,110 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import requests
from heat_integrationtests.common import test
from heat_integrationtests.scenario import scenario_base
class AutoscalingLoadBalancerTest(scenario_base.ScenarioTestsBase):
"""The class is responsible for testing ASG + LBv1 scenario.
The very common use case tested is an autoscaling group
of some web application servers behind a loadbalancer.
"""
def setUp(self):
super(AutoscalingLoadBalancerTest, self).setUp()
self.template_name = 'test_autoscaling_lb_neutron.yaml'
self.app_server_template_name = 'app_server_neutron.yaml'
self.webapp_template_name = 'netcat-webapp.yaml'
if not self.is_network_extension_supported('lbaas'):
self.skipTest('LBaas v1 extension not available, skipping')
def check_num_responses(self, url, expected_num, retries=10):
resp = set()
for count in range(retries):
time.sleep(1)
try:
r = requests.get(url, verify=self.verify_cert)
except requests.exceptions.ConnectionError:
# The LB may not be up yet, let's retry
continue
# skip unsuccessful requests
if r.status_code == 200:
resp.add(r.text)
self.assertEqual(expected_num, len(resp))
def test_autoscaling_loadbalancer_neutron(self):
"""Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.
The scenario is the following:
1. Launch a stack with a load balancer and autoscaling group
of one server, wait until stack create is complete.
2. Check that there is only one distinctive response from
loadbalanced IP.
3. Signal the scale_up policy, wait until all resources in
autoscaling group are complete.
4. Check that now there are two distinctive responses from
loadbalanced IP.
"""
parameters = {
'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'net': self.conf.fixed_network_name,
'subnet': self.conf.fixed_subnet_name,
'public_net': self.conf.floating_network_name,
'app_port': 8080,
'lb_port': 80,
'timeout': 600
}
app_server_template = self._load_template(
__file__, self.app_server_template_name, self.sub_dir
)
webapp_template = self._load_template(
__file__, self.webapp_template_name, self.sub_dir
)
files = {'appserver.yaml': app_server_template,
'webapp.yaml': webapp_template}
env = {'resource_registry':
{'OS::Test::NeutronAppServer': 'appserver.yaml',
'OS::Test::WebAppConfig': 'webapp.yaml'}}
# Launch stack
sid = self.launch_stack(
template_name=self.template_name,
parameters=parameters,
files=files,
environment=env
)
stack = self.client.stacks.get(sid)
lb_url = self._stack_output(stack, 'lburl')
# Check number of distinctive responces, must be 1
self.check_num_responses(lb_url, 1)
# Signal the scaling hook
self.client.resources.signal(sid, 'scale_up')
# Wait for AutoScalingGroup update to finish
asg = self.client.resources.get(sid, 'asg')
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id, 2, sid, 'scale_up')
# Check number of distinctive responses, must now be 2
self.check_num_responses(lb_url, 2)

View File

@ -1,122 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from heat_integrationtests.common import exceptions
from heat_integrationtests.scenario import scenario_base
class CfnInitIntegrationTest(scenario_base.ScenarioTestsBase):
"""Testing cfn-init and cfn-signal workability."""
def setUp(self):
super(CfnInitIntegrationTest, self).setUp()
if not self.conf.image_ref:
raise self.skipException("No image configured to test")
if not self.conf.instance_type:
raise self.skipException("No flavor configured to test")
def check_stack(self, sid):
# Check status of all resources
for res in ('WaitHandle', 'SmokeSecurityGroup', 'SmokeKeys',
'CfnUser', 'SmokeServer', 'SmokeServerElasticIp'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
server_resource = self.client.resources.get(sid, 'SmokeServer')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
try:
self._wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
finally:
# attempt to log the server console regardless of WaitCondition
# going to complete. This allows successful and failed cloud-init
# logs to be compared
self._log_console_output(servers=[server])
stack = self.client.stacks.get(sid)
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
# - a user was created and credentials written to the server
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
self._stack_output(stack, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['smoke_status'])
# Check EIP attributes.
server_floatingip_id = self._stack_output(stack,
'ElasticIp_Id')
self.assertIsNotNone(server_floatingip_id)
# Fetch EIP details.
net_show = self.network_client.show_floatingip(
floatingip=server_floatingip_id)
floating_ip = net_show['floatingip']['floating_ip_address']
port_id = net_show['floatingip']['port_id']
# Ensure that EIP was assigned to server.
port_show = self.network_client.show_port(port=port_id)
self.assertEqual(server.id, port_show['port']['device_id'])
server_ip = self._stack_output(stack, 'SmokeServerElasticIp')
self.assertEqual(server_ip, floating_ip)
# Check that created server is reachable
if not self._ping_ip_address(server_ip):
self._log_console_output(servers=[server])
self.fail(
"Timed out waiting for %s to become reachable" % server_ip)
# Check that the user can authenticate with the generated keypair
if self.keypair:
try:
linux_client = self.get_remote_client(
server_ip, username='ec2-user')
linux_client.validate_authentication()
except (exceptions.ServerUnreachable,
exceptions.SSHTimeout):
self._log_console_output(servers=[server])
raise
def test_server_cfn_init(self):
"""Check cfn-init and cfn-signal availability on the created server.
The alternative scenario is the following:
1. Create a stack with a server and configured security group.
2. Check that all stack resources were created.
3. Check that created server is reachable.
4. Check that stack was created successfully.
5. Check that is it possible to connect to server
via generated keypair.
"""
parameters = {
'key_name': self.keypair_name,
'flavor': self.conf.instance_type,
'image': self.conf.image_ref,
'timeout': self.conf.build_timeout,
'subnet': self.net['subnets'][0],
}
# Launch stack
stack_id = self.launch_stack(
template_name="test_server_cfn_init.yaml",
parameters=parameters,
expected_status=None
)
# Check stack
self.check_stack(stack_id)

View File

@ -1,85 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from heat_integrationtests.common import exceptions
from heat_integrationtests.scenario import scenario_base
class ServerSignalIntegrationTest(scenario_base.ScenarioTestsBase):
"""Test a server in a created network can signal to heat."""
def _test_server_signal(self, user_data_format='RAW',
image=None):
"""Check a server in a created network can signal to heat."""
parameters = {
'key_name': self.keypair_name,
'flavor': self.conf.minimal_instance_type,
'image': image,
'timeout': self.conf.build_timeout,
'user_data_format': user_data_format
}
# Launch stack
sid = self.launch_stack(
template_name="test_server_signal.yaml",
parameters=parameters,
expected_status=None
)
# Check status of all resources
for res in ('sg', 'floating_ip', 'network', 'router', 'subnet',
'router_interface', 'wait_handle', 'server',
'server_floating_ip_assoc'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
server_resource = self.client.resources.get(sid, 'server')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
try:
self._wait_for_resource_status(
sid, 'wait_condition', 'CREATE_COMPLETE')
except (exceptions.StackResourceBuildErrorException,
exceptions.TimeoutException):
raise
finally:
# attempt to log the server console regardless of WaitCondition
# going to complete. This allows successful and failed cloud-init
# logs to be compared
self._log_console_output(servers=[server])
stack = self.client.stacks.get(sid)
wc_data = json.loads(
self._stack_output(stack, 'wc_data'))
self.assertEqual({'1': 'test complete'}, wc_data)
server_ip = self._stack_output(stack, 'server_ip')
# Check that created server is reachable
if not self._ping_ip_address(server_ip):
self._log_console_output(servers=[server])
self.fail(
"Timed out waiting for %s to become reachable" % server_ip)
def test_server_signal_userdata_format_raw(self):
self._test_server_signal(image=self.conf.minimal_image_ref)
def test_server_signal_userdata_format_software_config(self):
if not self.conf.image_ref:
raise self.skipException("No image configured to test")
self._test_server_signal(user_data_format='SOFTWARE_CONFIG',
image=self.conf.image_ref)

View File

@ -1,171 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import template_utils
import six
from heat_integrationtests.scenario import scenario_base
CFG1_SH = '''#!/bin/sh
echo "Writing to /tmp/$bar"
echo $foo > /tmp/$bar
echo -n "The file /tmp/$bar contains `cat /tmp/$bar` for server \
$deploy_server_id during $deploy_action" > $heat_outputs_path.result
echo "Written to /tmp/$bar"
echo "Output to stderr" 1>&2
'''
CFG3_PP = '''file {'barfile':
ensure => file,
mode => 0644,
path => "/tmp/$::bar",
content => "$::foo",
}
file {'output_result':
ensure => file,
path => "$::heat_outputs_path.result",
mode => 0644,
content => "The file /tmp/$::bar contains $::foo for server \
$::deploy_server_id during $::deploy_action",
}
'''
class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
def setUp(self):
super(SoftwareConfigIntegrationTest, self).setUp()
if not self.conf.image_ref:
raise self.skipException("No image configured to test")
if not self.conf.instance_type:
raise self.skipException("No flavor configured to test")
def check_stack(self):
sid = self.stack_identifier
# Check that all stack resources were created
for res in ('cfg2a', 'cfg2b', 'cfg1', 'cfg3', 'server'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
server_resource = self.client.resources.get(sid, 'server')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
# Waiting for each deployment to contribute their
# config to resource
try:
for res in ('dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_IN_PROGRESS')
server_metadata = self.client.resources.metadata(
sid, 'server')
deployments = dict((d['name'], d) for d in
server_metadata['deployments'])
for res in ('dep2a', 'dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
finally:
# attempt to log the server console regardless of deployments
# going to complete. This allows successful and failed boot
# logs to be compared
self._log_console_output(servers=[server])
complete_server_metadata = self.client.resources.metadata(
sid, 'server')
# Ensure any previously available deployments haven't changed so
# config isn't re-triggered
complete_deployments = dict((d['name'], d) for d in
complete_server_metadata['deployments'])
for k, v in six.iteritems(deployments):
self.assertEqual(v, complete_deployments[k])
stack = self.client.stacks.get(sid)
res1 = self._stack_output(stack, 'res1')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/baaaaa', 'fooooo', server_id, 'CREATE'),
res1['result'])
self.assertEqual(0, res1['status_code'])
self.assertEqual('Output to stderr\n', res1['stderr'])
self.assertGreater(len(res1['stdout']), 0)
res2 = self._stack_output(stack, 'res2')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/cfn-init-foo', 'barrr', server_id, 'CREATE'),
res2['result'])
self.assertEqual(0, res2['status_code'])
self.assertEqual('', res2['stderr'])
self.assertEqual('', res2['stdout'])
res3 = self._stack_output(stack, 'res3')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/ba', 'fo', server_id, 'CREATE'),
res3['result'])
self.assertEqual(0, res3['status_code'])
self.assertEqual('', res3['stderr'])
self.assertGreater(len(res1['stdout']), 0)
dep1_resource = self.client.resources.get(sid, 'dep1')
dep1_id = dep1_resource.physical_resource_id
dep1_dep = self.client.software_deployments.get(dep1_id)
if hasattr(dep1_dep, 'updated_time'):
# Only check updated_time if the attribute exists.
# This allows latest heat agent code to be tested with
# Juno heat (which doesn't expose updated_time)
self.assertIsNotNone(dep1_dep.updated_time)
self.assertNotEqual(
dep1_dep.updated_time,
dep1_dep.creation_time)
def test_server_software_config(self):
"""Check that passed files with scripts are executed on created server.
The alternative scenario is the following:
1. Create a stack and pass files with scripts.
2. Check that all stack resources are created successfully.
3. Wait for all deployments.
4. Check that stack was created.
5. Check stack outputs.
"""
parameters = {
'key_name': self.keypair_name,
'flavor': self.conf.instance_type,
'image': self.conf.image_ref,
'network': self.net['id']
}
files = {
'cfg1.sh': CFG1_SH,
'cfg3.pp': CFG3_PP
}
env_files, env = template_utils.process_environment_and_files(
self.conf.boot_config_env)
# Launch stack
self.stack_identifier = self.launch_stack(
template_name='test_server_software_config.yaml',
parameters=parameters,
files=dict(list(files.items()) + list(env_files.items())),
expected_status=None,
environment=env
)
# Check stack
self.check_stack()

View File

@ -1,129 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exceptions
from oslo_log import log as logging
import six
from heat_integrationtests.common import exceptions
from heat_integrationtests.scenario import scenario_base
LOG = logging.getLogger(__name__)
class VolumeBackupRestoreIntegrationTest(scenario_base.ScenarioTestsBase):
"""Class is responsible for testing of volume backup."""
def setUp(self):
super(VolumeBackupRestoreIntegrationTest, self).setUp()
self.volume_description = 'A test volume description 123'
self.volume_size = self.conf.volume_size
def _cinder_verify(self, volume_id, expected_status='available'):
self.assertIsNotNone(volume_id)
volume = self.volume_client.volumes.get(volume_id)
self.assertIsNotNone(volume)
self.assertEqual(expected_status, volume.status)
self.assertEqual(self.volume_size, volume.size)
self.assertEqual(self.volume_description,
volume.display_description)
def _outputs_verify(self, stack, expected_status='available'):
self.assertEqual(expected_status,
self._stack_output(stack, 'status'))
self.assertEqual(six.text_type(self.volume_size),
self._stack_output(stack, 'size'))
self.assertEqual(self.volume_description,
self._stack_output(stack, 'display_description'))
def check_stack(self, stack_id, parameters):
stack = self.client.stacks.get(stack_id)
# Verify with cinder that the volume exists, with matching details
volume_id = self._stack_output(stack, 'volume_id')
self._cinder_verify(volume_id, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack, expected_status='in-use')
# Delete the stack and ensure a backup is created for volume_id
# but the volume itself is gone
self._stack_delete(stack_id)
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id)
backups = self.volume_client.backups.list()
self.assertIsNotNone(backups)
backups_filtered = [b for b in backups if b.volume_id == volume_id]
self.assertEqual(1, len(backups_filtered))
backup = backups_filtered[0]
self.addCleanup(self.volume_client.backups.delete, backup.id)
# Now, we create another stack where the volume is created from the
# backup created by the previous stack
try:
stack_identifier2 = self.launch_stack(
template_name='test_volumes_create_from_backup.yaml',
parameters=parameters,
add_parameters={'backup_id': backup.id})
stack2 = self.client.stacks.get(stack_identifier2)
except exceptions.StackBuildErrorException:
LOG.exception("Halting test due to bug: #1382300")
return
# Verify with cinder that the volume exists, with matching details
volume_id2 = self._stack_output(stack2, 'volume_id')
self._cinder_verify(volume_id2, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack2, expected_status='in-use')
testfile_data = self._stack_output(stack2, 'testfile_data')
self.assertEqual('{"instance1": "Volume Data:ateststring"}',
testfile_data)
# Delete the stack and ensure the volume is gone
self._stack_delete(stack_identifier2)
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id2)
def test_cinder_volume_create_backup_restore(self):
"""Ensure the 'Snapshot' deletion policy works.
This requires a more complex test, but it tests several aspects
of the heat cinder resources:
1. Create a volume, attach it to an instance, write some data to it
2. Delete the stack, with 'Snapshot' specified, creates a backup
3. Check the snapshot has created a volume backup
4. Create a new stack, where the volume is created from the backup
5. Verify the test data written in (1) is present in the new volume
"""
parameters = {
'key_name': self.keypair_name,
'instance_type': self.conf.minimal_instance_type,
'image_id': self.conf.minimal_image_ref,
'volume_description': self.volume_description,
'timeout': self.conf.build_timeout,
'network': self.net['id']
}
# Launch stack
stack_id = self.launch_stack(
template_name='test_volumes_delete_snapshot.yaml',
parameters=parameters,
add_parameters={'volume_size': self.volume_size}
)
# Check stack
self.check_stack(stack_id, parameters)

View File

@ -24,5 +24,4 @@ testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
reno>=2.5.0 # Apache-2.0
# Next are used in integration tests only
paramiko>=2.0.0 # LGPLv2.1+
tempest>=17.1.0 # Apache-2.0