add old tests to temporary folder
Change-Id: I3acbe0375689f622eac1cccf7b164c7e2112cad7
This commit is contained in:
parent
651ce244cd
commit
5510805a62
86
obsolete/base.py
Normal file
86
obsolete/base.py
Normal file
@ -0,0 +1,86 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ConfigParser
|
||||
import functools
|
||||
import os
|
||||
|
||||
import testtools
|
||||
|
||||
import tempest.test
|
||||
|
||||
|
||||
def skip(*args, **kwargs):
|
||||
"""A decorator useful to skip tests with message."""
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*func_args, **func_kwargs):
|
||||
if "bug" in kwargs:
|
||||
msg = "Skipped until Bug %s is resolved." % kwargs["bug"]
|
||||
else:
|
||||
msg = kwargs["msg"]
|
||||
raise testtools.TestCase.skipException(msg)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class TestCasePreparationError(Exception):
|
||||
def __init__(self, msg="Error in test case preparation"):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class BaseTest(tempest.test.BaseTestCase):
|
||||
"""Base class for Cloudscaling tests"""
|
||||
pass
|
||||
|
||||
|
||||
class BaseBenchmarkTest(BaseTest):
|
||||
"""Base class for Cloudscaling tests"""
|
||||
|
||||
@classmethod
|
||||
def _load_benchmark_data(cls, class_name):
|
||||
cfg = cls.config.cloudscaling
|
||||
if not cfg.benchmark_data:
|
||||
return None
|
||||
|
||||
config = ConfigParser.ConfigParser()
|
||||
f = open(os.path.expanduser(cfg.benchmark_data))
|
||||
config.readfp(f)
|
||||
f.close()
|
||||
items = config.items(class_name)
|
||||
result_items = {}
|
||||
for item in items:
|
||||
boundaries = item[1].split("-")
|
||||
if len(boundaries) == 2:
|
||||
result_items[item[0]] = (boundaries[0], boundaries[1])
|
||||
|
||||
cls.benchmark_data = result_items
|
||||
|
||||
def _get_benchmark_data(self):
|
||||
return self.benchmark_data
|
||||
|
||||
def _get_benchmark_result(self, result_name=None):
|
||||
if not hasattr(self, 'benchmark_data'):
|
||||
return None
|
||||
|
||||
key = self._testMethodName.lower()
|
||||
if result_name is not None:
|
||||
key += "." + result_name
|
||||
if key in self.benchmark_data:
|
||||
return self.benchmark_data[key]
|
||||
|
||||
return None
|
361
obsolete/thirdparty/scenario/aws_compat/base.py
vendored
Normal file
361
obsolete/thirdparty/scenario/aws_compat/base.py
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import fnmatch
|
||||
|
||||
from boto import exception as boto_exception
|
||||
import netaddr
|
||||
|
||||
from tempest import auth
|
||||
from tempest import clients as base_clients
|
||||
from tempest.cloudscaling import base
|
||||
from tempest.cloudscaling.thirdparty.scenario.aws_compat import clients
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest import test as base_test
|
||||
from tempest.thirdparty.boto import test
|
||||
from tempest.thirdparty.boto.utils import wait as boto_wait
|
||||
|
||||
|
||||
VOLUME_SIZE = 1
|
||||
|
||||
|
||||
class BaseAWSTest(base.BaseTest, test.BotoTestCase):
|
||||
"""Base class for AWS compat Cloudscaling tests"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(BaseAWSTest, cls).setUpClass()
|
||||
|
||||
cls.os = clients.Manager()
|
||||
cls.ec2_client = cls.os.ec2api_client
|
||||
cls.vpc_client = cls.os.vpc_client
|
||||
|
||||
cls.config = config.CONF
|
||||
cls.instance_type = cls.config.boto.instance_type
|
||||
|
||||
@classmethod
|
||||
def _processException(cls, exc):
|
||||
if isinstance(exc, boto_exception.EC2ResponseError):
|
||||
value = getattr(exc, "message", None)
|
||||
if not value:
|
||||
value = getattr(exc, "error_message", None)
|
||||
msg = str(exc.error_code) + ": " + str(value)
|
||||
return (base_test.TestResultLabel.ERROR, msg)
|
||||
return super(BaseAWSTest, cls)._processException(exc)
|
||||
|
||||
@classmethod
|
||||
def _prepare_image_id(cls, image_name):
|
||||
"""Searches existing available image ID by given name pattern"""
|
||||
|
||||
images = cls.ec2_client.get_all_images(filters={
|
||||
"name": image_name,
|
||||
"image-type": "machine",
|
||||
"is-public": "true"})
|
||||
# NOTE(apavlov) There is no filtering in nova-api-ec2. Filter here.
|
||||
filtered_images = []
|
||||
for image in images:
|
||||
if not fnmatch.fnmatch(image.name, image_name):
|
||||
continue
|
||||
if image.type != "machine":
|
||||
continue
|
||||
if not image.is_public:
|
||||
continue
|
||||
filtered_images.append(image)
|
||||
if len(filtered_images) > 0:
|
||||
return filtered_images[0].id
|
||||
|
||||
return image_name
|
||||
|
||||
@classmethod
|
||||
def _prepare_key_pair(cls):
|
||||
"""Key-pair preparation"""
|
||||
|
||||
keypair_name = data_utils.rand_name("keypair-")
|
||||
keypair = cls.ec2_client.create_key_pair(keypair_name)
|
||||
if keypair is None or keypair.name is None:
|
||||
raise base.TestCasePreparationError("Can`t create keypair")
|
||||
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
|
||||
keypair_name)
|
||||
return keypair
|
||||
|
||||
@classmethod
|
||||
def _prepare_security_group(cls):
|
||||
"""Security-group preparation"""
|
||||
|
||||
sec_group_name = data_utils.rand_name("securitygroup-")
|
||||
group_desc = sec_group_name + " security group description "
|
||||
security_group = cls.ec2_client.create_security_group(
|
||||
sec_group_name, group_desc)
|
||||
if security_group is None or security_group.name is None:
|
||||
raise base.TestCasePreparationError("Can't create security group")
|
||||
cls.addResourceCleanUp(cls.destroy_security_group_wait,
|
||||
security_group)
|
||||
result = cls.ec2_client.authorize_security_group(
|
||||
sec_group_name,
|
||||
ip_protocol="icmp",
|
||||
cidr_ip="0.0.0.0/0",
|
||||
from_port=-1,
|
||||
to_port=-1)
|
||||
if not result:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can`t authorize security group")
|
||||
|
||||
result = cls.ec2_client.authorize_security_group(
|
||||
sec_group_name,
|
||||
ip_protocol="tcp",
|
||||
cidr_ip="0.0.0.0/0",
|
||||
from_port=22,
|
||||
to_port=22)
|
||||
if not result:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can`t authorize security group")
|
||||
return security_group
|
||||
|
||||
@classmethod
|
||||
def _destroy_security_group_wait(cls, group):
|
||||
def _delete():
|
||||
cls.ec2_client.delete_security_group(group_id=group.id)
|
||||
|
||||
boto_wait.wait_no_exception(_delete)
|
||||
|
||||
@classmethod
|
||||
def _destroy_internet_gateway(cls, internet_gateway):
|
||||
igs = cls.vpc_client.get_all_internet_gateways(
|
||||
internet_gateway_ids=[internet_gateway.id])
|
||||
if len(igs) == 0:
|
||||
return
|
||||
ig = igs[0]
|
||||
for attachment in ig.attachments:
|
||||
cls.vpc_client.detach_internet_gateway(ig.id, attachment.vpc_id)
|
||||
cls.vpc_client.delete_internet_gateway(ig.id)
|
||||
|
||||
@classmethod
|
||||
def _delete_subnet_wait(cls, subnet):
|
||||
def _delete():
|
||||
cls.vpc_client.delete_subnet(subnet.id)
|
||||
|
||||
boto_wait.wait_no_exception(_delete)
|
||||
|
||||
@classmethod
|
||||
def _prepare_public_ip(cls, instance, network_interface_id=None):
|
||||
"""Public IP preparation"""
|
||||
|
||||
ip_address = instance.ip_address
|
||||
|
||||
if ip_address is None or ip_address == instance.private_ip_address:
|
||||
domain = "vpc" if instance.vpc_id is not None else None
|
||||
address = cls.ec2_client.allocate_address(domain)
|
||||
if address is None or not address.public_ip:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can't allocate public IP")
|
||||
if domain is None:
|
||||
# NOTE(ft): this is temporary workaround for OS
|
||||
# it must be removed after VPC integration
|
||||
cls.addResourceCleanUp(address.delete)
|
||||
status = address.associate(instance.id)
|
||||
if not status:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can't associate IP with instance")
|
||||
cls.addResourceCleanUp(address.disassociate)
|
||||
else:
|
||||
cls.addResourceCleanUp(cls.ec2_client.release_address,
|
||||
allocation_id=address.allocation_id)
|
||||
if network_interface_id:
|
||||
status = cls.ec2_client.associate_address(
|
||||
allocation_id=address.allocation_id,
|
||||
network_interface_id=network_interface_id)
|
||||
else:
|
||||
status = cls.ec2_client.associate_address(
|
||||
instance.id, allocation_id=address.allocation_id)
|
||||
if not status:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can't associate IP with instance")
|
||||
addresses = cls.ec2_client.get_all_addresses(
|
||||
allocation_ids=[address.allocation_id])
|
||||
if addresses is None or len(addresses) != 1:
|
||||
raise base.TestCasePreparationError(
|
||||
"Can't get address by allocation_id")
|
||||
address = addresses[0]
|
||||
cls.addResourceCleanUp(cls.ec2_client.disassociate_address,
|
||||
association_id=address.association_id)
|
||||
instance.update()
|
||||
ip_address = address.public_ip
|
||||
|
||||
return ip_address
|
||||
|
||||
@classmethod
|
||||
def _wait_instance_state(cls, instance, final_set):
|
||||
if not isinstance(final_set, set):
|
||||
final_set = set((final_set,))
|
||||
final_set |= cls.gone_set
|
||||
lfunction = cls.get_lfunction_gone(instance)
|
||||
state = boto_wait.state_wait(lfunction, final_set,
|
||||
cls.valid_instance_state)
|
||||
if state not in final_set:
|
||||
raise base.TestCasePreparationError("Error in waiting for "
|
||||
"instance(state = '%s')" % state)
|
||||
|
||||
@classmethod
|
||||
def _correct_ns_if_needed(cls, ssh):
|
||||
try:
|
||||
ssh.exec_command("host www.com")
|
||||
except exceptions.SSHExecCommandFailed:
|
||||
# NOTE(apavlov) update nameservers (mandatory for local devstack)
|
||||
ssh.exec_command("sudo su -c 'echo nameserver 8.8.8.8 "
|
||||
"> /etc/resolv.conf'")
|
||||
ssh.exec_command("host www.com")
|
||||
|
||||
@classmethod
|
||||
def _prepare_ebs_image(cls):
|
||||
if cls.config.cloudscaling.ebs_image_id:
|
||||
return cls.config.cloudscaling.ebs_image_id
|
||||
|
||||
if not cls.config.cloudscaling.image_id_ami:
|
||||
raise cls.skipException("".join(("EC2 ", cls.__name__,
|
||||
": requires image_id_ami setting")))
|
||||
|
||||
if not cls.config.service_available.cinder:
|
||||
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
|
||||
raise cls.skipException(skip_msg)
|
||||
if not cls.config.service_available.nova:
|
||||
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
|
||||
raise cls.skipException(skip_msg)
|
||||
|
||||
admin_creds = auth.get_default_credentials('compute_admin')
|
||||
os = base_clients.Manager(admin_creds, interface='json')
|
||||
cls.os = os
|
||||
cls.volumes_client = os.volumes_client
|
||||
cls.servers_client = os.servers_client
|
||||
cls.images_client = os.images_client
|
||||
cls.snapshots_client = os.snapshots_client
|
||||
|
||||
# NOTE(apavlov): create volume
|
||||
resp, volume = cls.volumes_client.create_volume(VOLUME_SIZE,
|
||||
display_name="aws_volume")
|
||||
assert 200 == resp.status
|
||||
cls.addResourceCleanUp(cls._delete_volume, volume['id'])
|
||||
cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
|
||||
|
||||
# NOTE(apavlov): boot instance
|
||||
bdm = [{
|
||||
"volume_id": volume['id'],
|
||||
"delete_on_termination": "1",
|
||||
"device_name": "/dev/vda"}]
|
||||
resp, server = cls.servers_client.create_server(
|
||||
"aws_instance",
|
||||
cls.config.cloudscaling.image_id_ami,
|
||||
cls.config.compute.flavor_ref,
|
||||
block_device_mapping=bdm)
|
||||
assert 202 == resp.status
|
||||
rc_server = cls.addResourceCleanUp(cls.servers_client.delete_server,
|
||||
server['id'])
|
||||
cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
|
||||
# NOTE(apavlov): create image from instance
|
||||
image_name = data_utils.rand_name("aws_ebs_image-")
|
||||
resp, _ = cls.images_client.create_image(server['id'],
|
||||
image_name)
|
||||
assert 202 == resp.status
|
||||
cls.image_id = resp["location"].split('/')[-1]
|
||||
cls.addResourceCleanUp(cls.images_client.delete_image,
|
||||
cls.image_id)
|
||||
# NOTE(apavlov): delete instance
|
||||
cls.cancelResourceCleanUp(rc_server)
|
||||
cls.servers_client.delete_server(server['id'])
|
||||
cls.servers_client.wait_for_server_termination(server['id'])
|
||||
|
||||
images = cls.ec2_client.get_all_images()
|
||||
for image in images:
|
||||
if image_name in image.location:
|
||||
return image.id
|
||||
|
||||
raise base.TestCasePreparationError("Can't find ebs image.")
|
||||
|
||||
@classmethod
|
||||
def _delete_volume(cls, volume_id):
|
||||
resp, result = cls.snapshots_client.list_snapshots(
|
||||
{"volume_id": volume_id})
|
||||
if 200 == resp.status:
|
||||
for snapshot in result:
|
||||
cls.snapshots_client.delete_snapshot(snapshot['id'])
|
||||
cls.snapshots_client.wait_for_resource_deletion(snapshot['id'])
|
||||
cls.volumes_client.delete_volume(volume_id)
|
||||
|
||||
|
||||
class BaseVPCTest(BaseAWSTest):
|
||||
"""Base class for AWS VPC behavior tests."""
|
||||
|
||||
@classmethod
|
||||
@base_test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(BaseVPCTest, cls).setUpClass()
|
||||
cls.zone = cls.config.boto.aws_zone
|
||||
cfg = cls.config.cloudscaling
|
||||
cls.ssh_user = cfg.general_ssh_user_name
|
||||
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
|
||||
(cls.subnet_cidr,) = cls.vpc_cidr.subnet(cfg.vpc_subnet_prefix, 1)
|
||||
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
|
||||
@classmethod
|
||||
def _tune_vpc(cls, vpc):
|
||||
ig = cls.vpc_client.create_internet_gateway()
|
||||
if ig is None or not ig.id:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls._destroy_internet_gateway, ig)
|
||||
status = cls.vpc_client.attach_internet_gateway(ig.id, vpc.id)
|
||||
if not status:
|
||||
raise base.TestCasePreparationError()
|
||||
rtables = cls.vpc_client.get_all_route_tables(
|
||||
filters=[("vpc-id", vpc.id)])
|
||||
if rtables is None or len(rtables) != 1:
|
||||
raise base.TestCasePreparationError()
|
||||
status = cls.vpc_client.create_route(rtables[0].id, "0.0.0.0/0",
|
||||
gateway_id=ig.id)
|
||||
if not status:
|
||||
raise base.TestCasePreparationError()
|
||||
secgroups = cls.vpc_client.get_all_security_groups(
|
||||
filters={"vpc-id": vpc.id})
|
||||
if secgroups is None or len(secgroups) != 1:
|
||||
raise base.TestCasePreparationError()
|
||||
status = cls.vpc_client.authorize_security_group(
|
||||
group_id=secgroups[0].id, ip_protocol="-1",
|
||||
from_port=-1, to_port=-1, cidr_ip="0.0.0.0/0")
|
||||
if not status:
|
||||
raise base.TestCasePreparationError()
|
||||
|
||||
@classmethod
|
||||
def _prepare_vpc(cls, vpc_cidr, sn_cidr):
|
||||
# NOTE(Alex) The following code is introduced for OpenStack
|
||||
# and potentially requires fix in boto. See details in
|
||||
# test_vpc_nat_scenario.
|
||||
dhcp_opts = cls.vpc_client.create_dhcp_options(
|
||||
domain_name_servers=['8.8.8.8'])
|
||||
if dhcp_opts is None or not dhcp_opts.id:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls.vpc_client.delete_dhcp_options,
|
||||
dhcp_opts.id)
|
||||
vpc = cls.vpc_client.create_vpc(str(vpc_cidr))
|
||||
if vpc is None or not vpc.id:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls.vpc_client.delete_vpc, vpc.id)
|
||||
if not cls.vpc_client.associate_dhcp_options(dhcp_opts.id, vpc.id):
|
||||
raise base.TestCasePreparationError()
|
||||
cls._tune_vpc(vpc)
|
||||
sn = cls.vpc_client.create_subnet(vpc.id, str(sn_cidr), cls.zone)
|
||||
if sn is None or not sn.id:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls._delete_subnet_wait, sn)
|
||||
return sn
|
225
obsolete/thirdparty/scenario/aws_compat/test_ec2_instance_mysql.py
vendored
Normal file
225
obsolete/thirdparty/scenario/aws_compat/test_ec2_instance_mysql.py
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
import tempest.cloudscaling.utils as utils
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
from tempest.thirdparty.boto.utils import wait as boto_wait
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InstanceMySQLTest(aws_base.BaseAWSTest):
|
||||
"""
|
||||
Test 'Running MySQL on Amazon' (http://aws.amazon.com/articles/1663)
|
||||
"""
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(InstanceMySQLTest, cls).setUpClass()
|
||||
|
||||
cfg = cls.config.cloudscaling
|
||||
image_name = cfg.mysql_image_name
|
||||
cls.ssh_user = cfg.mysql_ssh_user_name
|
||||
cls.volume_attach_name = "sdh"
|
||||
|
||||
cls.image_id = cls._prepare_image_id(image_name)
|
||||
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
sg = cls._prepare_security_group()
|
||||
cls.sec_group_name = sg.name
|
||||
|
||||
def test_integration_mysql(self):
|
||||
"""Test based on http://aws.amazon.com/articles/1663"""
|
||||
|
||||
snapshot = self._run_scenario(self._create_mysql_db)
|
||||
|
||||
self._run_scenario(self._restore_mysql_db, snapshot=snapshot)
|
||||
|
||||
def _run_scenario(self, scenario_func, snapshot=None):
|
||||
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
|
||||
reservation = self.ec2_client.run_instances(self.image_id,
|
||||
instance_type=self.instance_type,
|
||||
key_name=self.keypair.name,
|
||||
security_groups=(self.sec_group_name,))
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
instance = reservation.instances[0]
|
||||
LOG.info("state: %s", instance.state)
|
||||
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
|
||||
# NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
|
||||
zone = instance.placement
|
||||
volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
|
||||
self.addResourceCleanUp(self.destroy_volume_wait, volume)
|
||||
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
|
||||
self.assertVolumeStatusWait(volume, "available")
|
||||
|
||||
ip_address = self._prepare_public_ip(instance)
|
||||
ssh = remote_client.RemoteClient(ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
|
||||
# NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
|
||||
# and wait until it will be available
|
||||
part_lines = ssh.get_partitions().split('\n')
|
||||
volume.attach(instance.id, "/dev/" + self.volume_attach_name)
|
||||
|
||||
def _volume_state():
|
||||
volume.update(validate=True)
|
||||
return volume.status
|
||||
|
||||
self.assertVolumeStatusWait(_volume_state, "in-use")
|
||||
boto_wait.re_search_wait(_volume_state, "in-use")
|
||||
|
||||
def _part_state():
|
||||
current = ssh.get_partitions().split('\n')
|
||||
if len(current) > len(part_lines):
|
||||
return 1
|
||||
if len(current) < len(part_lines):
|
||||
return -1
|
||||
return 0
|
||||
|
||||
boto_wait.state_wait(_part_state, 1)
|
||||
part_lines_new = ssh.get_partitions().split('\n')
|
||||
self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
|
||||
part_lines = part_lines_new
|
||||
|
||||
self._correct_ns_if_needed(ssh)
|
||||
|
||||
snapshot = scenario_func(ssh, volume.id)
|
||||
|
||||
# NOTE(apavlov): stop this instance(imagine that it will be used)
|
||||
instance.stop()
|
||||
LOG.info("state: %s", instance.state)
|
||||
if instance.state != "stopped":
|
||||
self.assertInstanceStateWait(instance, "stopped")
|
||||
|
||||
return snapshot
|
||||
|
||||
def _create_mysql_db(self, ssh, volume_id):
|
||||
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -fy")
|
||||
|
||||
# install mysql
|
||||
ssh.exec_command("echo mysql-server-5.1 mysql-server/"
|
||||
"root_password password rootpass | sudo debconf-set-selections"
|
||||
"&& echo mysql-server-5.1 mysql-server/"
|
||||
"root_password_again password rootpass "
|
||||
"| sudo debconf-set-selections"
|
||||
"&& echo mysql-server-5.1 mysql-server/"
|
||||
"start_on_boot boolean true | sudo debconf-set-selections")
|
||||
ssh.exec_command("sudo apt-get install -y xfsprogs mysql-server")
|
||||
|
||||
ssh.exec_command("grep -q xfs /proc/filesystems || sudo modprobe xfs")
|
||||
ssh.exec_command("sudo mkfs.xfs /dev/" + self.volume_name)
|
||||
ssh.exec_command("echo '/dev/" + self.volume_name
|
||||
+ " /vol xfs noatime 0 0' "
|
||||
"| sudo tee -a /etc/fstab")
|
||||
ssh.exec_command("sudo mkdir -m 000 /vol && sudo mount /vol")
|
||||
|
||||
# NOTE(apavlov): Move the existing database files to the EBS volume.
|
||||
ssh.exec_command("sudo /etc/init.d/mysql stop"
|
||||
"&& sudo mkdir /vol/etc /vol/lib /vol/log"
|
||||
"&& sudo mv /etc/mysql /vol/etc/"
|
||||
"&& sudo mv /var/lib/mysql /vol/lib/"
|
||||
"&& sudo mv /var/log/mysql /vol/log/")
|
||||
|
||||
ssh.exec_command("sudo mkdir /etc/mysql"
|
||||
"&& sudo mkdir /var/lib/mysql"
|
||||
"&& sudo mkdir /var/log/mysql")
|
||||
|
||||
ssh.exec_command("echo '/vol/etc/mysql /etc/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab"
|
||||
"&& sudo mount /etc/mysql")
|
||||
|
||||
ssh.exec_command("echo '/vol/lib/mysql /var/lib/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab"
|
||||
"&& sudo mount /var/lib/mysql")
|
||||
|
||||
ssh.exec_command("echo '/vol/log/mysql /var/log/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab"
|
||||
"&& sudo mount /var/log/mysql")
|
||||
ssh.exec_command("sudo /etc/init.d/mysql start")
|
||||
|
||||
# NOTE(apavlov): add test DB
|
||||
ssh.exec_command("mysql -u root --password=rootpass -e "
|
||||
"'CREATE DATABASE tutorial_sample'")
|
||||
|
||||
resp = ssh.exec_command("mysql -u root --password=rootpass "
|
||||
"-e 'SHOW DATABASES'")
|
||||
self.assertIn("tutorial_sample", resp)
|
||||
|
||||
# NOTE(apavlov): make snapshot
|
||||
ssh.exec_command("mysql -u root --password=rootpass -e '"
|
||||
"FLUSH TABLES WITH READ LOCK;"
|
||||
"SHOW MASTER STATUS;"
|
||||
"SYSTEM sudo xfs_freeze -f /vol;'")
|
||||
|
||||
snapshot = self.ec2_client.create_snapshot(volume_id)
|
||||
self.addResourceCleanUp(self.destroy_snapshot_wait, snapshot)
|
||||
self.assertSnapshotStatusWait(snapshot, "completed")
|
||||
|
||||
ssh.exec_command("mysql -u root --password=rootpass -e '"
|
||||
"SYSTEM sudo xfs_freeze -u /vol;"
|
||||
"UNLOCK TABLES;'")
|
||||
|
||||
# NOTE(apavlov): cleanup
|
||||
ssh.exec_command("sudo /etc/init.d/mysql stop"
|
||||
"&& sudo umount /etc/mysql /var/lib/mysql /var/log/mysql /vol")
|
||||
|
||||
return snapshot
|
||||
|
||||
def _restore_mysql_db(self, ssh, volume_id):
|
||||
ssh.exec_command("sudo apt-get update")
|
||||
ssh.exec_command("sudo apt-get upgrade -y")
|
||||
|
||||
# install mysql
|
||||
ssh.exec_command("export DEBIAN_FRONTEND=noninteractive")
|
||||
ssh.exec_command("sudo -E apt-get install -y xfsprogs mysql-server")
|
||||
|
||||
ssh.exec_command("echo '/dev/" + self.volume_name
|
||||
+ " /vol xfs noatime 0 0' "
|
||||
"| sudo tee -a /etc/fstab")
|
||||
ssh.exec_command("sudo mkdir -m 000 /vol")
|
||||
ssh.exec_command("sudo mount /vol")
|
||||
|
||||
ssh.exec_command("sudo find /vol/{lib,log}/mysql/ ! -user root -print0"
|
||||
" | sudo xargs -0 -r chown mysql")
|
||||
ssh.exec_command("sudo find /vol/{lib,log}/mysql/ ! -group root -a !"
|
||||
" -group adm -print0 | sudo xargs -0 -r chgrp mysql")
|
||||
ssh.exec_command("sudo /etc/init.d/mysql stop")
|
||||
ssh.exec_command("echo '/vol/etc/mysql /etc/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab")
|
||||
ssh.exec_command("sudo mount /etc/mysql")
|
||||
ssh.exec_command("echo '/vol/lib/mysql /var/lib/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab")
|
||||
ssh.exec_command("sudo mount /var/lib/mysql")
|
||||
ssh.exec_command("echo '/vol/log/mysql /var/log/mysql none bind' "
|
||||
"| sudo tee -a /etc/fstab")
|
||||
ssh.exec_command("sudo mount /var/log/mysql")
|
||||
ssh.exec_command("sudo /etc/init.d/mysql start")
|
||||
|
||||
resp = ssh.exec_command("mysql -u root --password=rootpass "
|
||||
"-e 'SHOW DATABASES'")
|
||||
self.assertIn("tutorial_sample", resp)
|
||||
|
||||
# NOTE(apavlov): cleanup
|
||||
ssh.exec_command("sudo /etc/init.d/mysql stop"
|
||||
"&& sudo umount /etc/mysql /var/lib/mysql /var/log/mysql /vol")
|
||||
|
||||
return None
|
110
obsolete/thirdparty/scenario/aws_compat/test_ec2_unixbench.py
vendored
Normal file
110
obsolete/thirdparty/scenario/aws_compat/test_ec2_unixbench.py
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from testtools import content as test_content
|
||||
|
||||
import tempest.cloudscaling.base as base
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnixBenchTest(base.BaseBenchmarkTest, aws_base.BaseAWSTest):
|
||||
"""UnixBench set of tests used to test performance compatibility to AWS"""
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(UnixBenchTest, cls).setUpClass()
|
||||
|
||||
cls._load_benchmark_data("UnixBenchTest")
|
||||
|
||||
cfg = cls.config.cloudscaling
|
||||
image_name = cfg.general_image_name
|
||||
cls.ssh_user = cfg.general_ssh_user_name
|
||||
|
||||
cls.image_id = cls._prepare_image_id(image_name)
|
||||
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
sg = cls._prepare_security_group()
|
||||
cls.sec_group_name = sg.name
|
||||
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
|
||||
reservation = cls.ec2_client.run_instances(cls.image_id,
|
||||
instance_type=cls.instance_type,
|
||||
key_name=cls.keypair.name,
|
||||
security_groups=(cls.sec_group_name,))
|
||||
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
|
||||
cls.instance = reservation.instances[0]
|
||||
LOG.info("state: %s", cls.instance.state)
|
||||
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
|
||||
cls._wait_instance_state(cls.instance, "running")
|
||||
cls._prepare_public_ip(cls.instance)
|
||||
|
||||
ip_address = cls._prepare_public_ip(cls.instance)
|
||||
cls.ssh = remote_client.RemoteClient(ip_address,
|
||||
cls.ssh_user,
|
||||
pkey=cls.keypair.material)
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_run_benchmark(self):
|
||||
"""Run UnixBench test on prepared instance"""
|
||||
if self.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
ssh = self.ssh
|
||||
|
||||
self._correct_ns_if_needed(ssh)
|
||||
|
||||
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -fy")
|
||||
ssh.exec_command("sudo apt-get update")
|
||||
ssh.exec_command("sudo apt-get install -y make gcc")
|
||||
ssh.exec_command("sudo apt-get install -y libx11-dev libgl1-mesa-dev "
|
||||
"libxext-dev perl perl-modules")
|
||||
ssh.exec_command("wget http://byte-unixbench.googlecode.com/files"
|
||||
"/UnixBench5.1.3.tgz")
|
||||
ssh.exec_command("tar xvf UnixBench5.1.3.tgz")
|
||||
resp = ssh.exec_command("cd UnixBench && ./Run")
|
||||
|
||||
i = resp.find("---------------")
|
||||
if i != -1:
|
||||
resp = resp[i:]
|
||||
resp = "zone: " + self.instance.placement + "\n" + resp
|
||||
|
||||
fail = None
|
||||
reference = self._get_benchmark_data()
|
||||
for k, v in reference.iteritems():
|
||||
i1 = resp.lower().find(k)
|
||||
if i1 == -1:
|
||||
continue
|
||||
|
||||
k = resp[i1:i1 + len(k)]
|
||||
i2 = resp.find("\n", i1)
|
||||
outp = resp[i1 + len(k):i2].split()[:2]
|
||||
if len(outp) < 2:
|
||||
continue
|
||||
|
||||
self.addDetail(k, test_content.text_content(
|
||||
outp[1] + "|" + outp[0] + "|Min: " + v[0] + "|Max: " + v[1]))
|
||||
|
||||
if fail is None and float(outp[0]) < float(v[0]):
|
||||
fail = (outp[0], outp[1], k, v[0])
|
||||
|
||||
if fail is not None:
|
||||
self.assertGreaterEqual(fail[0], fail[1],
|
||||
fail[2] + ": " +
|
||||
fail[0] + " " + fail[1] + " (current) < " +
|
||||
fail[3] + " " + fail[1] + " (AWS)")
|
250
obsolete/thirdparty/scenario/aws_compat/test_ec2_volume_benchmark.py
vendored
Normal file
250
obsolete/thirdparty/scenario/aws_compat/test_ec2_volume_benchmark.py
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import time
|
||||
|
||||
from testtools import content as test_content
|
||||
|
||||
import tempest.cloudscaling.base as base
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
import tempest.cloudscaling.utils as utils
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
from tempest.thirdparty.boto.utils import wait as boto_wait
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeBenchmarkTest(base.BaseBenchmarkTest, aws_base.BaseAWSTest):
|
||||
|
||||
class Context:
|
||||
instance = None
|
||||
ssh = None
|
||||
volume = None
|
||||
part_lines = None
|
||||
volume_ready = False
|
||||
volume_filled = False
|
||||
snapshot = None
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(VolumeBenchmarkTest, cls).setUpClass()
|
||||
|
||||
cls._load_benchmark_data("VolumeBenchmarkTest")
|
||||
|
||||
cfg = cls.config.cloudscaling
|
||||
image_name = cfg.general_image_name
|
||||
cls.ssh_user = cfg.general_ssh_user_name
|
||||
cls.volume_size = cfg.volume_benchmark_volume_size_gb
|
||||
cls.volume_fill = cfg.volume_benchmark_volume_fill_percent
|
||||
cls.volume_attach_name = "sdh"
|
||||
cls.ctx = cls.Context()
|
||||
|
||||
cls.image_id = cls._prepare_image_id(image_name)
|
||||
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
sg = cls._prepare_security_group()
|
||||
cls.sec_group_name = sg.name
|
||||
|
||||
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
|
||||
reservation = cls.ec2_client.run_instances(cls.image_id,
|
||||
instance_type=cls.instance_type,
|
||||
key_name=cls.keypair.name,
|
||||
security_groups=(cls.sec_group_name,))
|
||||
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
|
||||
instance = reservation.instances[0]
|
||||
LOG.info("state: %s", instance.state)
|
||||
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
|
||||
cls._wait_instance_state(instance, "running")
|
||||
cls.ctx.instance = instance
|
||||
|
||||
ip_address = cls._prepare_public_ip(instance)
|
||||
ssh = remote_client.RemoteClient(ip_address,
|
||||
cls.ssh_user,
|
||||
pkey=cls.keypair.material)
|
||||
cls.ctx.ssh = ssh
|
||||
|
||||
def _volume_state(self):
|
||||
self.ctx.volume.update(validate=True)
|
||||
return self.ctx.volume.status
|
||||
|
||||
def _part_state(self):
|
||||
current = self.ctx.ssh.get_partitions().split('\n')
|
||||
if len(current) > len(self.ctx.part_lines):
|
||||
return 1
|
||||
if len(current) < len(self.ctx.part_lines):
|
||||
return -1
|
||||
return 0
|
||||
|
||||
def _start_test(self):
|
||||
self.start_time = time.time()
|
||||
|
||||
def _end_test(self, detail_description):
|
||||
end_time = time.time()
|
||||
self.test_time = end_time - self.start_time
|
||||
content = test_content.text_content(
|
||||
detail_description + " time: " + str(self.test_time) + "s")
|
||||
self.addDetail("Current", content)
|
||||
reference_time = self._get_benchmark_result()
|
||||
if reference_time is not None:
|
||||
content = test_content.text_content(
|
||||
"Min time: " + str(reference_time[0]) + "s, " +
|
||||
"Max time: " + str(reference_time[1]) + "s")
|
||||
self.addDetail("AWS", content)
|
||||
|
||||
def _check_test(self):
|
||||
reference_time = self._get_benchmark_result()
|
||||
if reference_time is not None:
|
||||
self.assertLessEqual(self.test_time, float(reference_time[1]),
|
||||
str(self.test_time) + "s (current) > " +
|
||||
reference_time[1] + "s (AWS)")
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_001_attach_volume(self):
|
||||
"""Attach volume"""
|
||||
|
||||
if self.ctx.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
|
||||
self._start_test()
|
||||
|
||||
# NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
|
||||
zone = self.ctx.instance.placement
|
||||
volume = self.ec2_client.create_volume(self.volume_size, zone)
|
||||
self.addResourceCleanUp(self.destroy_volume_wait, volume)
|
||||
self.ctx.volume = volume
|
||||
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
|
||||
self.assertVolumeStatusWait(volume, "available")
|
||||
|
||||
# NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
|
||||
# and wait until it will be available
|
||||
self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
|
||||
volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)
|
||||
|
||||
# NOTE(apavlov): "attaching" invalid EC2 status #1074901
|
||||
self.assertVolumeStatusWait(self._volume_state, "in-use")
|
||||
boto_wait.re_search_wait(self._volume_state, "in-use")
|
||||
|
||||
boto_wait.state_wait(self._part_state, 1)
|
||||
part_lines_new = self.ctx.ssh.get_partitions().split('\n')
|
||||
volume_name = utils.detect_new_volume(self.ctx.part_lines,
|
||||
part_lines_new)
|
||||
self.ctx.part_lines = part_lines_new
|
||||
|
||||
self._end_test("Create and attach volume")
|
||||
|
||||
self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
|
||||
"&& sudo mkfs.ext3 /dev/" + volume_name)
|
||||
self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
|
||||
"&& sudo mount /dev/" + volume_name + " /vol")
|
||||
self.ctx.volume_ready = True
|
||||
|
||||
self._check_test()
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_002_fill_volume(self):
|
||||
"""Fill volume with data"""
|
||||
|
||||
if self.ctx.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
if not self.ctx.volume_ready:
|
||||
raise self.skipException("Volume preparation failed")
|
||||
|
||||
self._start_test()
|
||||
|
||||
self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol/data")
|
||||
file_lines = 102 * int(self.volume_size)
|
||||
for i in xrange(int(self.volume_fill)):
|
||||
self.ctx.ssh.exec_command("cat /dev/urandom "
|
||||
"| tr -d -c 'a-zA-Z0-9' "
|
||||
"| fold -w 1020 "
|
||||
"| head -n " + str(file_lines) +
|
||||
" > /vol/data/file" + str(i))
|
||||
|
||||
self._end_test("Volume filling")
|
||||
|
||||
self.ctx.volume_filled = True
|
||||
|
||||
self._check_test()
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_003_snapshot_volume(self):
|
||||
"""Snapshot volume"""
|
||||
|
||||
if self.ctx.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
if not self.ctx.volume_filled:
|
||||
raise self.skipException("Volume filling failed")
|
||||
|
||||
self._start_test()
|
||||
|
||||
snapshot = self.ec2_client.create_snapshot(self.ctx.volume.id)
|
||||
self.addResourceCleanUp(self.destroy_snapshot_wait, snapshot)
|
||||
self.assertSnapshotStatusWait(snapshot, "completed")
|
||||
|
||||
self._end_test("Snapshot creation")
|
||||
|
||||
self.ctx.snapshot = snapshot
|
||||
|
||||
self._check_test()
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_004_clone_volume_snapshot(self):
|
||||
"""Clone volume"""
|
||||
|
||||
if self.ctx.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
if self.ctx.snapshot is None:
|
||||
raise self.skipException("Snapshot of volume failed")
|
||||
|
||||
self._start_test()
|
||||
|
||||
zone = self.ctx.instance.placement
|
||||
volume2 = self.ec2_client.create_volume(
|
||||
self.volume_size, zone, snapshot=self.ctx.snapshot)
|
||||
self.addResourceCleanUp(self.destroy_volume_wait, volume2)
|
||||
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
|
||||
self.assertVolumeStatusWait(volume2, "available")
|
||||
|
||||
self._end_test("Volume creation by snapshot")
|
||||
|
||||
self._check_test()
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_005_detach_volume(self):
|
||||
"""Detach volume"""
|
||||
|
||||
if self.ctx.ssh is None:
|
||||
raise self.skipException("Booting failed")
|
||||
if not self.ctx.volume_ready:
|
||||
raise self.skipException("Volume preparation failed")
|
||||
|
||||
self._start_test()
|
||||
|
||||
self.ctx.ssh.exec_command("sudo umount /vol")
|
||||
|
||||
self.ctx.volume.detach()
|
||||
|
||||
# NOTE(apavlov): "detaching" invalid EC2 status #1074901
|
||||
self.assertVolumeStatusWait(self._volume_state, "available")
|
||||
boto_wait.re_search_wait(self._volume_state, "available")
|
||||
|
||||
self._end_test("Detach volume")
|
||||
|
||||
boto_wait.state_wait(self._part_state, -1)
|
||||
|
||||
self._check_test()
|
297
obsolete/thirdparty/scenario/aws_compat/test_vpc_behavior.py
vendored
Normal file
297
obsolete/thirdparty/scenario/aws_compat/test_vpc_behavior.py
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
|
||||
import boto.exception
|
||||
import netaddr
|
||||
|
||||
from tempest.cloudscaling import base
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
from tempest.thirdparty.boto.utils import wait as boto_wait
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
class VPC_Behavior_Base(aws_base.BaseVPCTest):
|
||||
"""Base class for AWS VPC behavior tests."""
|
||||
|
||||
@classmethod
|
||||
def _run_instance(cls, subnet, private_ip=None):
|
||||
params = {
|
||||
"key_name": cls.keypair.name,
|
||||
"instance_type": cls.instance_type,
|
||||
"placement": cls.zone,
|
||||
"subnet_id": subnet.id,
|
||||
}
|
||||
if private_ip:
|
||||
params["private_ip_address"] = str(private_ip)
|
||||
reservation = cls.vpc_client.run_instances(cls.image_id,
|
||||
**params)
|
||||
if reservation is None:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
|
||||
if len(reservation.instances) != 1:
|
||||
raise base.TestCasePreparationError()
|
||||
instance = reservation.instances[0]
|
||||
return instance
|
||||
|
||||
|
||||
class VPC_Behavior(VPC_Behavior_Base):
|
||||
"""Test various behavior of VPC network."""
|
||||
|
||||
class TcpDumpRunner(object):
|
||||
timeout = None
|
||||
|
||||
def __init__(self, instance, ssh_user, ssh_keypair, parameters):
|
||||
ssh = remote_client.RemoteClient(instance.ip_address,
|
||||
ssh_user,
|
||||
pkey=ssh_keypair.material)
|
||||
ssh.ssh_client.channel_timeout = float(self.timeout)
|
||||
self.ssh = ssh
|
||||
self.parameters = parameters
|
||||
self.thread = None
|
||||
|
||||
def __enter__(self):
|
||||
self.ssh.exec_command("rm -f tcpdump.log")
|
||||
thread = threading.Thread(target=self._run_tcpdump)
|
||||
thread.start()
|
||||
self._sync()
|
||||
self.thread = thread
|
||||
return self
|
||||
|
||||
def __exit__(self, ex_type, ex_value, ex_traceback):
|
||||
self.stop()
|
||||
|
||||
def _run_tcpdump(self):
|
||||
self.ssh.exec_command("sudo tcpdump %s >tcpdump.log 2>&1" %
|
||||
self.parameters)
|
||||
|
||||
def _sync(self):
|
||||
def check_tcpdump_is_ready():
|
||||
resp = self.ssh.exec_command("test -f tcpdump.log && echo 1 "
|
||||
"|| echo 0")
|
||||
return int(resp) == 1
|
||||
boto_wait.state_wait(check_tcpdump_is_ready, True)
|
||||
|
||||
def stop(self):
|
||||
if self.thread is None:
|
||||
return
|
||||
self.ssh.exec_command("sudo pkill -SIGINT tcpdump")
|
||||
thread = self.thread
|
||||
self.thread = None
|
||||
thread.join(float(self.timeout))
|
||||
return not thread.is_alive()
|
||||
|
||||
def get_result(self):
|
||||
resp = self.ssh.exec_command("cat tcpdump.log")
|
||||
return resp
|
||||
|
||||
class Context(object):
|
||||
instance3 = None
|
||||
lease_file = None
|
||||
gateway = None
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(VPC_Behavior, cls).setUpClass()
|
||||
cls.TcpDumpRunner.timeout = cls.config.boto.build_timeout
|
||||
cls.subnet = cls._prepare_vpc(cls.vpc_cidr, cls.subnet_cidr)
|
||||
cls.instance1 = cls._run_instance(cls.subnet)
|
||||
cls.instance2 = cls._run_instance(cls.subnet)
|
||||
cls._wait_instance_state(cls.instance1, "running")
|
||||
cls._wait_instance_state(cls.instance2, "running")
|
||||
cls.instance1.ip_address = cls._prepare_public_ip(cls.instance1)
|
||||
ssh = remote_client.RemoteClient(cls.instance1.ip_address,
|
||||
cls.ssh_user,
|
||||
pkey=cls.keypair.material)
|
||||
ssh.exec_command("sudo apt-get update")
|
||||
ssh.exec_command("sudo DEBIAN_FRONTEND=noninteractive apt-get -fqy "
|
||||
"install socat nmap")
|
||||
cls.ctx = cls.Context()
|
||||
|
||||
def test_011_check_network_gateway(self):
|
||||
"""Is gateway local to subnet?"""
|
||||
ssh = remote_client.RemoteClient(self.instance1.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
resp = ssh.exec_command("route -n | awk '{ if ($1==\"0.0.0.0\" && "
|
||||
"$4 ~ /.*G.*/) print $2 }'")
|
||||
lines = resp.splitlines()
|
||||
self.assertEqual(1, len(lines))
|
||||
gateway = netaddr.IPAddress(lines[0])
|
||||
self.ctx.gateway = gateway
|
||||
self.assertTrue(gateway in self.subnet_cidr)
|
||||
|
||||
def test_012_check_dhcp_grant_ip(self):
|
||||
"""Whether dhcp provide IP address?"""
|
||||
instance = self._run_instance(self.subnet)
|
||||
state = self.waitInstanceState(instance, "running")
|
||||
if state != "running":
|
||||
raise base.TestCasePreparationError()
|
||||
self.assertTrue(instance.private_ip_address)
|
||||
instance.ip_address = self._prepare_public_ip(instance)
|
||||
self.ctx.instance3 = instance
|
||||
|
||||
def test_013_check_dhcp_lease(self):
|
||||
"""Whether IP address was obtained by dhcp?"""
|
||||
if self.ctx.instance3 is None:
|
||||
self.skipTest("Instance 3 was not initialized")
|
||||
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
resp = ssh.exec_command("ps -eo comm,args | grep -m 1 dhclient")
|
||||
args = resp.split()
|
||||
if len(args) <= 2 or not args[0].startswith('dhclient'):
|
||||
raise base.TestCasePreparationError()
|
||||
is_lf = False
|
||||
lease_file = "/var/lib/dhcp/dhclient.leases"
|
||||
for arg in args:
|
||||
if is_lf:
|
||||
lease_file = arg
|
||||
is_lf = False
|
||||
elif arg == "-lf":
|
||||
is_lf = True
|
||||
resp = ssh.exec_command("test -f %s && echo 1 || echo 0" % lease_file)
|
||||
self.assertEqual(1, int(resp))
|
||||
self.ctx.lease_file = lease_file
|
||||
resp = ssh.exec_command("grep 'fixed-address ' %s | tail -n 1 | "
|
||||
"awk '{ print $2 }' | sed -e 's/;//'" %
|
||||
lease_file)
|
||||
lines = resp.splitlines()
|
||||
self.assertEqual(1, len(lines))
|
||||
self.assertEqual(self.ctx.instance3.private_ip_address, lines[0])
|
||||
date = ssh.exec_command("date -u +%Y/%m/%d%H:%M:%S")
|
||||
self.assertTrue(date)
|
||||
resp = ssh.exec_command("grep 'renew ' %s | tail -n 1 | "
|
||||
"awk '{ print $3$4 }' | sed -e 's/;//'" %
|
||||
lease_file)
|
||||
self.assertLess(date, resp)
|
||||
|
||||
def test_014_check_dhcp_sends_mtu_size(self):
|
||||
"""Check DHCP sends MTU size."""
|
||||
if self.ctx.lease_file is None:
|
||||
self.skipTest("Dhcp lease file was not found")
|
||||
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
resp = ssh.exec_command("grep 'option interface-mtu ' %s" %
|
||||
self.ctx.lease_file)
|
||||
self.assertLess(0, len(resp.splitlines()))
|
||||
|
||||
def test_015_check_dhcp_distribute_host_name_size(self):
|
||||
"""Check DHCP distributes host hame."""
|
||||
if self.ctx.lease_file is None:
|
||||
self.skipTest("Dhcp lease file was not found")
|
||||
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
resp = ssh.exec_command("grep 'option host-name ' %s" %
|
||||
self.ctx.lease_file)
|
||||
self.assertLess(0, len(resp.splitlines()))
|
||||
|
||||
def test_021_check_traffic_visibility(self):
|
||||
"""Are other VMs visible?"""
|
||||
if self.ctx.instance3 is None:
|
||||
self.skipTest("Instance 3 was not initialized")
|
||||
with self.TcpDumpRunner(self.ctx.instance3,
|
||||
self.ssh_user,
|
||||
self.keypair,
|
||||
"ip proto \\\\icmp") as tdump:
|
||||
ssh = remote_client.RemoteClient(self.instance1.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.exec_command("ping -c 1 %s" %
|
||||
self.instance2.private_ip_address)
|
||||
if not tdump.stop():
|
||||
raise base.TestCasePreparationError()
|
||||
resp = tdump.get_result()
|
||||
for line in resp.splitlines():
|
||||
if line.endswith("packets captured"):
|
||||
captured = line
|
||||
break
|
||||
tokens = captured.split()
|
||||
packets = int(tokens[0])
|
||||
self.assertEqual(0, packets)
|
||||
|
||||
def test_022_check_broadcast_visible(self):
|
||||
"""Is broadcast traffic visible?"""
|
||||
if self.ctx.instance3 is None:
|
||||
self.skipTest("Instance 3 was not initialized")
|
||||
with self.TcpDumpRunner(self.ctx.instance3,
|
||||
self.ssh_user,
|
||||
self.keypair,
|
||||
"ip broadcast") as tdump:
|
||||
ssh = remote_client.RemoteClient(self.instance1.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.exec_command("echo ping |"
|
||||
"socat - UDP4-DATAGRAM:255.255.255.255:6666,"
|
||||
"broadcast")
|
||||
if not tdump.stop():
|
||||
raise base.TestCasePreparationError()
|
||||
resp = tdump.get_result()
|
||||
captured = ""
|
||||
for line in resp.splitlines():
|
||||
if line.endswith(" captured"):
|
||||
captured = line
|
||||
break
|
||||
tokens = captured.split()
|
||||
packets = int(tokens[0])
|
||||
self.assertEqual(0, packets)
|
||||
|
||||
def test_023_check_multicast_visible(self):
|
||||
"""Is multicast traffic visible?"""
|
||||
if self.ctx.instance3 is None:
|
||||
self.skipTest("Instance 3 was not initialized")
|
||||
with self.TcpDumpRunner(self.ctx.instance3,
|
||||
self.ssh_user,
|
||||
self.keypair,
|
||||
"ip multicast") as tdump:
|
||||
ssh = remote_client.RemoteClient(self.instance1.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.exec_command("echo ping |"
|
||||
"socat - UDP4-DATAGRAM:239.1.1.1:6666")
|
||||
if not tdump.stop():
|
||||
raise base.TestCasePreparationError()
|
||||
resp = tdump.get_result()
|
||||
captured = ""
|
||||
for line in resp.splitlines():
|
||||
if line.endswith(" captured"):
|
||||
captured = line
|
||||
break
|
||||
tokens = captured.split()
|
||||
packets = int(tokens[0])
|
||||
self.assertEqual(0, packets)
|
||||
|
||||
def test_031_scan_gateway_ports(self):
|
||||
"""Are gateway ports closed?"""
|
||||
if self.ctx.gateway is None:
|
||||
self.skipTest("Subnet's gateway was not found")
|
||||
ssh = remote_client.RemoteClient(self.instance1.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.ssh_client.channel_timeout = 600
|
||||
resp = ssh.exec_command("sudo nmap -PN %s" % str(self.ctx.gateway))
|
||||
all_closed_msg = ("All 1000 scanned ports on %s are " %
|
||||
str(self.ctx.gateway))
|
||||
for line in resp.splitlines():
|
||||
if line.startswith(all_closed_msg):
|
||||
return
|
||||
self.fail("Some gateway ports are open")
|
188
obsolete/thirdparty/scenario/aws_compat/test_vpc_benchmark.py
vendored
Normal file
188
obsolete/thirdparty/scenario/aws_compat/test_vpc_benchmark.py
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import select
|
||||
|
||||
from testtools import content as test_content
|
||||
|
||||
from tempest.cloudscaling import base
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import exceptions
|
||||
from tempest import test
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
class IPerfServer(object):
|
||||
"""Wrapper to use iperf server in tests."""
|
||||
cmd = "iperf -s"
|
||||
|
||||
def __init__(self, instance, ssh_user, ssh_keypair):
|
||||
self.instance = instance
|
||||
self.ssh_user = ssh_user
|
||||
self.ssh_keypair = ssh_keypair
|
||||
|
||||
def __enter__(self):
|
||||
# NOTE(ft): Iperf doesn't close stdout in server mode
|
||||
# but standard exec_command waits for it
|
||||
# so instead of use it we waits for some string in output
|
||||
ssh = remote_client.RemoteClient(self.instance.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.ssh_keypair.material)
|
||||
ssh_conn = ssh.ssh_client._get_ssh_connection()
|
||||
chan = ssh_conn.get_transport().open_session()
|
||||
chan.get_pty() # NOTE(ft): to stop iperf with session end
|
||||
chan.fileno()
|
||||
chan.exec_command(self.cmd)
|
||||
|
||||
started = False
|
||||
out_data = []
|
||||
err_data = []
|
||||
select_params = [chan], [], [], ssh.ssh_client.channel_timeout
|
||||
while True:
|
||||
ready = select.select(*select_params)
|
||||
if not any(ready):
|
||||
raise exceptions.TimeoutException(
|
||||
"Cannot start iperf server on host '{1}'.".format(
|
||||
self.host))
|
||||
if not ready[0]:
|
||||
continue
|
||||
out_chunk = err_chunk = None
|
||||
if chan.recv_ready():
|
||||
out_chunk = chan.recv(ssh.ssh_client.buf_size)
|
||||
out_data += out_chunk,
|
||||
if chan.recv_stderr_ready():
|
||||
err_chunk = chan.recv_stderr(ssh.ssh_client.buf_size)
|
||||
err_data += err_chunk,
|
||||
if chan.exit_status_ready():
|
||||
exit_status = chan.recv_exit_status()
|
||||
if 0 != exit_status or len(err_data) > 0:
|
||||
raise exceptions.SSHExecCommandFailed(
|
||||
command=self.cmd, exit_status=exit_status,
|
||||
strerror=''.join(err_data))
|
||||
lines = ''.join(out_data).splitlines()
|
||||
for line in lines:
|
||||
if line.startswith("Server listening"):
|
||||
started = True
|
||||
break
|
||||
if (started or
|
||||
chan.closed and not err_chunk and not out_chunk):
|
||||
break
|
||||
self.ssh = ssh
|
||||
self.ssh_conn = ssh_conn
|
||||
self.chan = chan
|
||||
|
||||
def __exit__(self, ex_type, ex_value, ex_traceback):
|
||||
self.chan.close()
|
||||
self.ssh_conn.close()
|
||||
|
||||
|
||||
class VPC_Benchmark(aws_base.BaseVPCTest, base.BaseBenchmarkTest):
|
||||
"""Benchmark VPC network throughput."""
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(VPC_Benchmark, cls).setUpClass()
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
subnet = cls._prepare_vpc(cls.vpc_cidr, cls.subnet_cidr)
|
||||
|
||||
reservation = cls.vpc_client.run_instances(
|
||||
cls.image_id,
|
||||
min_count=2, max_count=2,
|
||||
key_name=cls.keypair.name,
|
||||
instance_type=cls.instance_type,
|
||||
placement=cls.zone,
|
||||
subnet_id=subnet.id)
|
||||
if reservation is None:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
|
||||
if len(reservation.instances) != 2:
|
||||
raise base.TestCasePreparationError()
|
||||
cls.instance1 = reservation.instances[0]
|
||||
cls.instance2 = reservation.instances[1]
|
||||
cls._wait_instance_state(cls.instance1, "running")
|
||||
cls._wait_instance_state(cls.instance2, "running")
|
||||
cls._prepare_public_ip(cls.instance1)
|
||||
cls._prepare_public_ip(cls.instance2)
|
||||
|
||||
def install_iperf(instance):
|
||||
try:
|
||||
ssh = remote_client.RemoteClient(instance.ip_address,
|
||||
cls.ssh_user,
|
||||
pkey=cls.keypair.material)
|
||||
except exceptions.SSHTimeout:
|
||||
raise base.TestCasePreparationError()
|
||||
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -y")
|
||||
ssh.exec_command("sudo apt-get update")
|
||||
ssh.exec_command("sudo apt-get install iperf")
|
||||
install_iperf(cls.instance1)
|
||||
install_iperf(cls.instance2)
|
||||
|
||||
cfg = cls.config.cloudscaling
|
||||
cls.network_performance_class = cfg.network_performance_class
|
||||
cls._load_benchmark_data("AWS_VPC_Benchmark")
|
||||
|
||||
def _get_rate(self, resp):
|
||||
resp_items = resp.split(",")
|
||||
rate = resp_items[len(resp_items) - 1]
|
||||
return int(rate) / 1000000
|
||||
|
||||
def _check_test(self, rate):
|
||||
if not self.network_performance_class:
|
||||
return
|
||||
reference = self._get_benchmark_result(self.network_performance_class)
|
||||
if reference is not None:
|
||||
content = test_content.text_content(
|
||||
"Min rate: %sMbits/sec, Max rate: %sMBits/sec" %
|
||||
(reference[0], reference[1]))
|
||||
self.addDetail("AWS", content)
|
||||
self.assertGreaterEqual(rate, float(reference[0]),
|
||||
"%sMbits/sec (current) < %sMbits/sec (AWS)" %
|
||||
(rate, reference[0]))
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_001_internal_vpc_tcp_150MB_throughput(self):
|
||||
"""Measure internal VPC network throughput for 150 MBytes transmit."""
|
||||
if self.keypair is None:
|
||||
self.skipTest("Environment was not initialized")
|
||||
with IPerfServer(self.instance1, self.ssh_user, self.keypair):
|
||||
ssh = remote_client.RemoteClient(self.instance2.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
resp = ssh.exec_command("iperf -c %s -n 150M -x CMSV -y C" %
|
||||
self.instance1.private_ip_address)
|
||||
rate = self._get_rate(resp)
|
||||
self.addDetail("Current", test_content.text_content(
|
||||
"150 MBytes throughput: %s Mbits/sec" % rate))
|
||||
self._check_test(rate)
|
||||
|
||||
@test.attr(type='benchmark')
|
||||
def test_002_internal_vpc_tcp_2mins_throughput(self):
|
||||
"""Measure internal VPC network throughput for 2 mins transmit."""
|
||||
if self.keypair is None:
|
||||
self.skipTest("Environment was not initialized")
|
||||
with IPerfServer(self.instance1, self.ssh_user, self.keypair):
|
||||
ssh = remote_client.RemoteClient(self.instance2.ip_address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.ssh_client.channel_timeout = 130
|
||||
resp = ssh.exec_command("iperf -c %s -t 120 -x CMSV -y C" %
|
||||
self.instance1.private_ip_address)
|
||||
rate = self._get_rate(resp)
|
||||
self.addDetail("Current", test_content.text_content(
|
||||
"2 mins throughput: %s Mbits/sec" % rate))
|
||||
self._check_test(rate)
|
451
obsolete/thirdparty/scenario/aws_compat/test_vpc_nat_scenario.py
vendored
Normal file
451
obsolete/thirdparty/scenario/aws_compat/test_vpc_nat_scenario.py
vendored
Normal file
@ -0,0 +1,451 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from boto.ec2 import networkinterface
|
||||
import netaddr
|
||||
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
class VPC_NAT_Scenario(aws_base.BaseAWSTest):
|
||||
"""
|
||||
Based on 'VPC with Public and Private Subnets' scenario
|
||||
(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html)
|
||||
Adapted to work with OpenStack with the following differences:
|
||||
1. DNS is set up via DHCP options to 8.8.8.8 (boto has a bug, see Note).
|
||||
2. Opened DNS ports (53) in DB and NAT security groups.
|
||||
3. NAT instance is created with 2 interfaces in different subnets.
|
||||
4. SourceDestCheck is disabled for second interface of NAT instance.
|
||||
5. Default route in main route table is set to point to this interface.
|
||||
As a result, DB instance's default route goes through the second interface
|
||||
of NAT instance which is in the same subnet as the DB instance.
|
||||
To allow several private subnets to work through the same NAT, more
|
||||
secondary interfaces should be added to NAT instance for all of that
|
||||
subnets, and separate route tables should be created for each of the
|
||||
subnets.
|
||||
"""
|
||||
# NOTE(Alex) At the moment of this test's creation, boto has a bug with
|
||||
# parsing result of setting up DHCP options. Order of the Key-Values
|
||||
# returned in OpenStack for the Dictionaries is different from AWS's.
|
||||
# Potential fix should be done in boto/vpc/dhcpoptions.py:
|
||||
# DhcpConfigSet can be changed to:
|
||||
#
|
||||
# class DhcpConfigSet(dict):
|
||||
#
|
||||
# def startElement(self, name, attrs, connection):
|
||||
# if name == 'valueSet':
|
||||
# if not hasattr(self, '_value'):
|
||||
# self._value = DhcpValueSet()
|
||||
# return self._value
|
||||
#
|
||||
# def endElement(self, name, value, connection):
|
||||
# if name == 'valueSet':
|
||||
# if hasattr(self, '_name'):
|
||||
# self[self._name] = self._value
|
||||
# if name == 'key':
|
||||
# self._name = value
|
||||
# if hasattr(self, '_value'):
|
||||
# self[self._name] = self._value
|
||||
|
||||
class Context(object):
|
||||
vpc = None
|
||||
internet_gateway = None
|
||||
web_subnet = None
|
||||
db_subnet = None
|
||||
main_route_table = None
|
||||
custom_route_table = None
|
||||
web_security_group = None
|
||||
nat_security_group = None
|
||||
db_security_group = None
|
||||
web_instance = None
|
||||
db_instance = None
|
||||
nat_instance = None
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(VPC_NAT_Scenario, cls).setUpClass()
|
||||
cls.ctx = cls.Context()
|
||||
cls.zone = cls.config.boto.aws_zone
|
||||
cfg = cls.config.cloudscaling
|
||||
cls.ssh_user = cfg.general_ssh_user_name
|
||||
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
|
||||
cls.web_subnet, cls.db_subnet = cls.vpc_cidr.subnet(
|
||||
cfg.vpc_subnet_prefix, 2)
|
||||
cls.test_client_cidr = netaddr.IPNetwork(cfg.test_client_cidr)
|
||||
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.ctx is not None:
|
||||
for group in [cls.ctx.web_security_group,
|
||||
cls.ctx.nat_security_group,
|
||||
cls.ctx.db_security_group]:
|
||||
if not group:
|
||||
continue
|
||||
try:
|
||||
cls._revoke_security_group_linked_rules(group)
|
||||
except Exception:
|
||||
pass
|
||||
super(VPC_NAT_Scenario, cls).tearDownClass()
|
||||
|
||||
@classmethod
|
||||
def _revoke_security_group_linked_rules(cls, group):
|
||||
groups = cls.vpc_client.get_all_security_groups(group_ids=[group.id])
|
||||
if len(groups) == 0:
|
||||
return
|
||||
sg = groups[0]
|
||||
for rule in sg.rules:
|
||||
for grant in rule.grants:
|
||||
if not grant.cidr_ip:
|
||||
cls.vpc_client.revoke_security_group(
|
||||
group_id=sg.id,
|
||||
ip_protocol=rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
src_security_group_group_id=grant.groupId)
|
||||
for rule in sg.rules_egress:
|
||||
for grant in rule.grants:
|
||||
if not grant.cidr_ip:
|
||||
cls.vpc_client.revoke_security_group_egress(
|
||||
sg.id,
|
||||
rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
src_group_id=grant.groupId)
|
||||
|
||||
def test_000_create_vpc(self):
|
||||
"""Create VPC"""
|
||||
dhcp_opts = self.vpc_client.create_dhcp_options(
|
||||
domain_name_servers=['8.8.8.8'])
|
||||
self.assertIsNotNone(dhcp_opts)
|
||||
self.assertTrue(dhcp_opts.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_dhcp_options,
|
||||
dhcp_opts.id)
|
||||
vpc = self.vpc_client.create_vpc(str(self.vpc_cidr))
|
||||
self.assertIsNotNone(vpc)
|
||||
self.assertTrue(vpc.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_vpc, vpc.id)
|
||||
self.assertTrue(self.vpc_client.associate_dhcp_options(dhcp_opts.id,
|
||||
vpc.id))
|
||||
self.ctx.vpc = vpc
|
||||
|
||||
def test_001_create_internet_gateway(self):
|
||||
"""Create internet gateway"""
|
||||
ig = self.vpc_client.create_internet_gateway()
|
||||
self.assertIsNotNone(ig)
|
||||
self.assertTrue(ig.id)
|
||||
self.addResourceCleanUp(self._destroy_internet_gateway, ig)
|
||||
status = self.vpc_client.attach_internet_gateway(ig.id,
|
||||
self.ctx.vpc.id)
|
||||
self.assertTrue(status)
|
||||
self.ctx.internet_gateway = ig
|
||||
|
||||
def test_010_create_subnets(self):
|
||||
"""Create subnets"""
|
||||
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
|
||||
str(self.web_subnet),
|
||||
self.zone)
|
||||
self.assertIsNotNone(sn)
|
||||
self.assertTrue(sn.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
|
||||
self.ctx.web_subnet = sn
|
||||
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
|
||||
str(self.db_subnet),
|
||||
self.zone)
|
||||
self.assertIsNotNone(sn)
|
||||
self.assertTrue(sn.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
|
||||
self.ctx.db_subnet = sn
|
||||
|
||||
def test_020_get_main_route_table(self):
|
||||
"""Describe auto created route table"""
|
||||
rtables = self.vpc_client.get_all_route_tables(
|
||||
filters=[("vpc-id", self.ctx.vpc.id)])
|
||||
self.assertIsNotNone(rtables)
|
||||
self.assertEqual(1, len(rtables))
|
||||
self.ctx.main_route_table = rtables[0]
|
||||
|
||||
def test_025_create_custom_route_table(self):
|
||||
"""Create route table for web servers"""
|
||||
rtable = self.vpc_client.create_route_table(self.ctx.vpc.id)
|
||||
self.assertIsNotNone(rtable)
|
||||
self.assertTrue(rtable.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_route_table, rtable.id)
|
||||
ig = self.ctx.internet_gateway
|
||||
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
|
||||
gateway_id=ig.id)
|
||||
self.assertTrue(status)
|
||||
association_id = self.vpc_client.associate_route_table(
|
||||
rtable.id, self.ctx.web_subnet.id)
|
||||
self.assertTrue(association_id)
|
||||
self.addResourceCleanUp(self.vpc_client.disassociate_route_table,
|
||||
association_id)
|
||||
self.ctx.custom_route_table = rtable
|
||||
|
||||
def test_050_create_security_groups(self):
|
||||
"""Create and tune security groups"""
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("WebServerSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.web_security_group = sg
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("NATSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.nat_security_group = sg
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("DBServerSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.db_security_group = sg
|
||||
|
||||
sg = self.ctx.web_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 1433, 1433,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 3306, 3306,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 22, 22,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
|
||||
cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
|
||||
cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
|
||||
cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=3389,
|
||||
to_port=3389, cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
|
||||
sg = self.ctx.nat_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=53,
|
||||
to_port=53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="udp", from_port=53,
|
||||
to_port=53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
|
||||
cidr_ip=str(self.db_subnet))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
|
||||
cidr_ip=str(self.db_subnet))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
|
||||
cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
|
||||
sg = self.ctx.db_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=1433,
|
||||
to_port=1433,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=3306,
|
||||
to_port=3306,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=22,
|
||||
to_port=22,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
|
||||
def test_100_launch_nat_instance(self):
|
||||
"""Launch instances for NAT server"""
|
||||
interface_web = networkinterface.NetworkInterfaceSpecification(
|
||||
subnet_id=self.ctx.web_subnet.id,
|
||||
groups=[self.ctx.nat_security_group.id])
|
||||
interface_db = networkinterface.NetworkInterfaceSpecification(
|
||||
subnet_id=self.ctx.db_subnet.id,
|
||||
groups=[self.ctx.nat_security_group.id])
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
# security_group_ids=[self.ctx.nat_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
# subnet_id=self.ctx.web_subnet.id
|
||||
network_interfaces=(
|
||||
networkinterface.NetworkInterfaceCollection(
|
||||
interface_web, interface_db))
|
||||
)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
instance.ip_address = self._prepare_public_ip(
|
||||
instance,
|
||||
instance.interfaces[0].id)
|
||||
internal_interface_id = instance.interfaces[1].id
|
||||
status = self.vpc_client.modify_network_interface_attribute(
|
||||
internal_interface_id,
|
||||
attr='sourceDestCheck',
|
||||
value=False)
|
||||
self.assertTrue(status)
|
||||
|
||||
rtable = self.ctx.main_route_table
|
||||
status = self.vpc_client.create_route(
|
||||
rtable.id, "0.0.0.0/0",
|
||||
interface_id=internal_interface_id)
|
||||
self.assertTrue(status)
|
||||
self.ctx.nat_instance = instance
|
||||
|
||||
def test_101_launch_instances(self):
|
||||
"""Launch instances for web server and db server"""
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
security_group_ids=[self.ctx.web_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
subnet_id=self.ctx.web_subnet.id)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
instance.ip_address = self._prepare_public_ip(instance)
|
||||
self.ctx.web_instance = instance
|
||||
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
security_group_ids=[self.ctx.db_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
subnet_id=self.ctx.db_subnet.id)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
self.ctx.db_instance = instance
|
||||
|
||||
def test_102_tune_nat_instance(self):
|
||||
"""Tune NAT in NAT instance"""
|
||||
instance = self.ctx.nat_instance
|
||||
address = instance.ip_address
|
||||
ssh = remote_client.RemoteClient(address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
|
||||
ssh.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
|
||||
"-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
|
||||
ssh.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
|
||||
ssh.exec_command("echo $'auto eth1\niface eth1 inet dhcp\n' "
|
||||
"| sudo tee -a /etc/network/interfaces.d/eth1.cfg")
|
||||
ssh.exec_command("sudo ifup eth1")
|
||||
|
||||
def test_200_check_connectivity(self):
|
||||
"""Check inside and outside connectivities"""
|
||||
web_ip = self.ctx.web_instance.ip_address
|
||||
db_ip = self.ctx.db_instance.private_ip_address
|
||||
ssh = remote_client.RemoteClient(web_ip,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh_conn = ssh.ssh_client._get_ssh_connection()
|
||||
sftp = ssh_conn.open_sftp()
|
||||
fr = sftp.file("key.pem", 'wb')
|
||||
fr.set_pipelined(True)
|
||||
fr.write(self.keypair.material)
|
||||
fr.close()
|
||||
ssh_conn.close()
|
||||
ssh.exec_command('chmod 400 key.pem')
|
||||
ssh.exec_command(
|
||||
"ssh -i key.pem -o UserKnownHostsFile=/dev/null "
|
||||
"-o StrictHostKeyChecking=no %(user)s@%(ip)s "
|
||||
"curl -s http://google.com" %
|
||||
{"user": self.ssh_user, "ip": db_ip})
|
379
obsolete/thirdparty/scenario/aws_compat/test_vpc_scenario.py
vendored
Normal file
379
obsolete/thirdparty/scenario/aws_compat/test_vpc_scenario.py
vendored
Normal file
@ -0,0 +1,379 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import test
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
class VPC_Scenario(aws_base.BaseAWSTest):
|
||||
"""
|
||||
Reproduce 'VPC with Public and Private Subnets' scenario
|
||||
(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html)
|
||||
"""
|
||||
class Context(object):
|
||||
vpc = None
|
||||
internet_gateway = None
|
||||
web_subnet = None
|
||||
db_subnet = None
|
||||
main_route_table = None
|
||||
custom_route_table = None
|
||||
web_security_group = None
|
||||
nat_security_group = None
|
||||
db_security_group = None
|
||||
web_instance = None
|
||||
db_instance = None
|
||||
nat_instance = None
|
||||
|
||||
@classmethod
|
||||
@test.safe_setup
|
||||
def setUpClass(cls):
|
||||
super(VPC_Scenario, cls).setUpClass()
|
||||
cls.ctx = cls.Context()
|
||||
cls.zone = cls.config.boto.aws_zone
|
||||
cfg = cls.config.cloudscaling
|
||||
cls.ssh_user = cfg.general_ssh_user_name
|
||||
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
|
||||
cls.web_subnet, cls.db_subnet = cls.vpc_cidr.subnet(
|
||||
cfg.vpc_subnet_prefix, 2)
|
||||
cls.test_client_cidr = netaddr.IPNetwork(cfg.test_client_cidr)
|
||||
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
|
||||
cls.keypair = cls._prepare_key_pair()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.ctx is not None:
|
||||
for group in [cls.ctx.web_security_group,
|
||||
cls.ctx.nat_security_group,
|
||||
cls.ctx.db_security_group]:
|
||||
if not group:
|
||||
continue
|
||||
try:
|
||||
cls._revoke_security_group_linked_rules(group)
|
||||
except Exception:
|
||||
pass
|
||||
super(VPC_Scenario, cls).tearDownClass()
|
||||
|
||||
@classmethod
|
||||
def _revoke_security_group_linked_rules(cls, group):
|
||||
groups = cls.vpc_client.get_all_security_groups(group_ids=[group.id])
|
||||
if len(groups) == 0:
|
||||
return
|
||||
sg = groups[0]
|
||||
for rule in sg.rules:
|
||||
for grant in rule.grants:
|
||||
if not grant.cidr_ip:
|
||||
cls.vpc_client.revoke_security_group(
|
||||
group_id=sg.id,
|
||||
ip_protocol=rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
src_security_group_group_id=grant.groupId)
|
||||
for rule in sg.rules_egress:
|
||||
for grant in rule.grants:
|
||||
if not grant.cidr_ip:
|
||||
cls.vpc_client.revoke_security_group_egress(
|
||||
sg.id,
|
||||
rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
src_group_id=grant.groupId)
|
||||
|
||||
def test_000_create_vpc(self):
|
||||
"""Create VPC"""
|
||||
vpc = self.vpc_client.create_vpc(str(self.vpc_cidr))
|
||||
self.assertIsNotNone(vpc)
|
||||
self.assertTrue(vpc.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_vpc, vpc.id)
|
||||
self.ctx.vpc = vpc
|
||||
|
||||
def test_001_create_internet_gateway(self):
|
||||
"""Create internet gateway"""
|
||||
ig = self.vpc_client.create_internet_gateway()
|
||||
self.assertIsNotNone(ig)
|
||||
self.assertTrue(ig.id)
|
||||
self.addResourceCleanUp(self._destroy_internet_gateway, ig)
|
||||
status = self.vpc_client.attach_internet_gateway(ig.id,
|
||||
self.ctx.vpc.id)
|
||||
self.assertTrue(status)
|
||||
self.ctx.internet_gateway = ig
|
||||
|
||||
def test_010_create_subnets(self):
|
||||
"""Create subnets"""
|
||||
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
|
||||
str(self.web_subnet),
|
||||
self.zone)
|
||||
self.assertIsNotNone(sn)
|
||||
self.assertTrue(sn.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
|
||||
self.ctx.web_subnet = sn
|
||||
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
|
||||
str(self.db_subnet),
|
||||
self.zone)
|
||||
self.assertIsNotNone(sn)
|
||||
self.assertTrue(sn.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
|
||||
self.ctx.db_subnet = sn
|
||||
|
||||
def test_020_get_main_route_table(self):
|
||||
"""Describe auto created route table"""
|
||||
rtables = self.vpc_client.get_all_route_tables(
|
||||
filters=[("vpc-id", self.ctx.vpc.id)])
|
||||
self.assertIsNotNone(rtables)
|
||||
self.assertEqual(1, len(rtables))
|
||||
self.ctx.main_route_table = rtables[0]
|
||||
|
||||
def test_025_create_custom_route_table(self):
|
||||
"""Create route table for web servers"""
|
||||
rtable = self.vpc_client.create_route_table(self.ctx.vpc.id)
|
||||
self.assertIsNotNone(rtable)
|
||||
self.assertTrue(rtable.id)
|
||||
self.addResourceCleanUp(self.vpc_client.delete_route_table, rtable.id)
|
||||
ig = self.ctx.internet_gateway
|
||||
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
|
||||
gateway_id=ig.id)
|
||||
self.assertTrue(status)
|
||||
association_id = self.vpc_client.associate_route_table(
|
||||
rtable.id, self.ctx.web_subnet.id)
|
||||
self.assertTrue(association_id)
|
||||
self.addResourceCleanUp(self.vpc_client.disassociate_route_table,
|
||||
association_id)
|
||||
self.ctx.custom_route_table = rtable
|
||||
|
||||
def test_050_create_security_groups(self):
|
||||
"""Create and tune security groups"""
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("WebServerSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.web_security_group = sg
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("NATSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.nat_security_group = sg
|
||||
sg = self.vpc_client.create_security_group(
|
||||
data_utils.rand_name("DBServerSG-"),
|
||||
data_utils.rand_name("description "),
|
||||
self.ctx.vpc.id)
|
||||
self.assertIsNotNone(sg)
|
||||
self.assertTrue(sg.id)
|
||||
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
|
||||
self.ctx.db_security_group = sg
|
||||
|
||||
sg = self.ctx.web_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 1433, 1433,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 3306, 3306,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 22, 22,
|
||||
src_group_id=self.ctx.db_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
|
||||
cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
|
||||
cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
|
||||
cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=3389,
|
||||
to_port=3389, cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
|
||||
sg = self.ctx.nat_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
|
||||
cidr_ip=str(self.db_subnet))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
|
||||
cidr_ip=str(self.db_subnet))
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
|
||||
cidr_ip=str(self.test_client_cidr))
|
||||
self.assertTrue(status)
|
||||
|
||||
sg = self.ctx.db_security_group
|
||||
status = self.vpc_client.revoke_security_group_egress(
|
||||
sg.id, "-1", cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group_egress(
|
||||
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=1433,
|
||||
to_port=1433,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=3306,
|
||||
to_port=3306,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
# NOTE(ft): especially for connectivity test
|
||||
status = self.vpc_client.authorize_security_group(
|
||||
group_id=sg.id, ip_protocol="tcp",
|
||||
from_port=22,
|
||||
to_port=22,
|
||||
src_security_group_group_id=self.ctx.web_security_group.id)
|
||||
self.assertTrue(status)
|
||||
|
||||
def test_100_launch_nat_instance(self):
|
||||
"""Launch instances for NAT server"""
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
security_group_ids=[self.ctx.nat_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
subnet_id=self.ctx.web_subnet.id)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
self._prepare_public_ip(instance)
|
||||
status = self.vpc_client.modify_instance_attribute(
|
||||
instance.id, 'sourceDestCheck', False)
|
||||
self.assertTrue(status)
|
||||
|
||||
rtable = self.ctx.main_route_table
|
||||
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
|
||||
instance_id=instance.id)
|
||||
self.assertTrue(status)
|
||||
self.ctx.nat_instance = instance
|
||||
|
||||
def test_101_launch_instances(self):
|
||||
"""Launch instances for web server and db server"""
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
security_group_ids=[self.ctx.web_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
subnet_id=self.ctx.web_subnet.id)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
self._prepare_public_ip(instance)
|
||||
self.ctx.web_instance = instance
|
||||
|
||||
reservation = self.vpc_client.run_instances(
|
||||
self.image_id,
|
||||
key_name=self.keypair.name,
|
||||
security_group_ids=[self.ctx.db_security_group.id],
|
||||
instance_type=self.instance_type,
|
||||
placement=self.zone,
|
||||
subnet_id=self.ctx.db_subnet.id)
|
||||
self.assertIsNotNone(reservation)
|
||||
self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
self.assertEqual(1, len(reservation.instances))
|
||||
instance = reservation.instances[0]
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
self.ctx.db_instance = instance
|
||||
|
||||
def test_102_tune_nat_instance(self):
|
||||
"""Tune NAT in NAT instance"""
|
||||
instance = self.ctx.nat_instance
|
||||
address = instance.ip_address
|
||||
ssh = remote_client.RemoteClient(address,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
|
||||
# NOTE(ft): We must use tty mode, because some images (like Amazon
|
||||
# Linux) has restrictions (requiretty flag in /etc/sudoers)
|
||||
ssh_conn = ssh.ssh_client._get_ssh_connection()
|
||||
chan = ssh_conn.get_transport().open_session()
|
||||
chan.get_pty()
|
||||
chan.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
|
||||
"-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
|
||||
chan.close()
|
||||
chan = ssh_conn.get_transport().open_session()
|
||||
chan.get_pty()
|
||||
chan.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
|
||||
chan.close()
|
||||
ssh_conn.close()
|
||||
|
||||
def test_200_check_connectivity(self):
|
||||
"""Check inside and outside connectivities"""
|
||||
web_ip = self.ctx.web_instance.ip_address
|
||||
db_ip = self.ctx.db_instance.private_ip_address
|
||||
ssh = remote_client.RemoteClient(web_ip,
|
||||
self.ssh_user,
|
||||
pkey=self.keypair.material)
|
||||
ssh.exec_command("curl -s http://google.com")
|
||||
|
||||
ssh_conn = ssh.ssh_client._get_ssh_connection()
|
||||
sftp = ssh_conn.open_sftp()
|
||||
fr = sftp.file("key.pem", 'wb')
|
||||
fr.set_pipelined(True)
|
||||
fr.write(self.keypair.material)
|
||||
fr.close()
|
||||
ssh_conn.close()
|
||||
ssh.exec_command('chmod 400 key.pem')
|
||||
ssh.exec_command("ssh -i key.pem -o UserKnownHostsFile=/dev/null "
|
||||
"-o StrictHostKeyChecking=no %(user)s@%(ip)s "
|
||||
"curl -s http://google.com" %
|
||||
{"user": self.ssh_user, "ip": db_ip})
|
35
obsolete/utils.py
Normal file
35
obsolete/utils.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
logging.getLogger('boto').setLevel(logging.INFO)
|
||||
logging.getLogger('paramiko').setLevel(logging.WARNING)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def detect_new_volume(proc_partitions, proc_partitions_new):
|
||||
devices = get_devices(proc_partitions)
|
||||
devices_new = get_devices(proc_partitions_new)
|
||||
devices_new -= devices
|
||||
return devices_new.pop()
|
||||
|
||||
|
||||
def get_devices(proc_partitions):
|
||||
devices = set()
|
||||
for line in proc_partitions:
|
||||
items = [item for item in line.split(' ') if len(item) > 0]
|
||||
if len(items) > 0:
|
||||
devices.add(items[3])
|
||||
|
||||
return devices
|
Loading…
Reference in New Issue
Block a user