initial cleanup

some common code is not applicabe for this tempest plugin

Change-Id: Icc6c8a8fd76cffcc01c582e6623f798fcdd658f6
This commit is contained in:
Andrey Pavlov 2018-01-12 11:04:57 +03:00
parent 982e78ff16
commit 15cd422773
47 changed files with 30 additions and 3158 deletions

View File

@ -1,6 +0,0 @@
[run]
branch = True
source = ec2api_tempest_plugin
[report]
ignore_errors = True

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=./ec2api_tempest_plugin/tests
top_dir=./

View File

View File

@ -1,5 +0,0 @@
====================
Administrators guide
====================
Administrators guide of openstack.

View File

@ -1,5 +0,0 @@
================================
Command line interface reference
================================
CLI reference of openstack.

View File

@ -1,81 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
#'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openstack'
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
repository_name = 'openstack/openstack'
bug_project = 'ec2api_tempest_plugin'
bug_tag = ''
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,5 +0,0 @@
=============
Configuration
=============
Configuration of openstack.

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../../CONTRIBUTING.rst

View File

@ -1,9 +0,0 @@
===========================
Contributor Documentation
===========================
.. toctree::
:maxdepth: 2
contributing

View File

@ -1,30 +0,0 @@
.. openstack documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
=====================================================
Welcome to the documentation of ec2api_tempest_plugin
=====================================================
Contents:
.. toctree::
:maxdepth: 2
readme
install/index
library/index
contributor/index
configuration/index
cli/index
user/index
admin/index
reference/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,10 +0,0 @@
2. Edit the ``/etc/ec2api_tempest_plugin/ec2api_tempest_plugin.conf`` file and complete the following
actions:
* In the ``[database]`` section, configure database access:
.. code-block:: ini
[database]
...
connection = mysql+pymysql://ec2api_tempest_plugin:EC2API_TEMPEST_PLUGIN_DBPASS@controller/ec2api_tempest_plugin

View File

@ -1,75 +0,0 @@
Prerequisites
-------------
Before you install and configure the openstack service,
you must create a database, service credentials, and API endpoints.
#. To create the database, complete these steps:
* Use the database access client to connect to the database
server as the ``root`` user:
.. code-block:: console
$ mysql -u root -p
* Create the ``ec2api_tempest_plugin`` database:
.. code-block:: none
CREATE DATABASE ec2api_tempest_plugin;
* Grant proper access to the ``ec2api_tempest_plugin`` database:
.. code-block:: none
GRANT ALL PRIVILEGES ON ec2api_tempest_plugin.* TO 'ec2api_tempest_plugin'@'localhost' \
IDENTIFIED BY 'EC2API_TEMPEST_PLUGIN_DBPASS';
GRANT ALL PRIVILEGES ON ec2api_tempest_plugin.* TO 'ec2api_tempest_plugin'@'%' \
IDENTIFIED BY 'EC2API_TEMPEST_PLUGIN_DBPASS';
Replace ``EC2API_TEMPEST_PLUGIN_DBPASS`` with a suitable password.
* Exit the database access client.
.. code-block:: none
exit;
#. Source the ``admin`` credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. To create the service credentials, complete these steps:
* Create the ``ec2api_tempest_plugin`` user:
.. code-block:: console
$ openstack user create --domain default --password-prompt ec2api_tempest_plugin
* Add the ``admin`` role to the ``ec2api_tempest_plugin`` user:
.. code-block:: console
$ openstack role add --project service --user ec2api_tempest_plugin admin
* Create the ec2api_tempest_plugin service entities:
.. code-block:: console
$ openstack service create --name ec2api_tempest_plugin --description "openstack" openstack
#. Create the openstack service API endpoints:
.. code-block:: console
$ openstack endpoint create --region RegionOne \
openstack public http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
openstack internal http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
openstack admin http://controller:XXXX/vY/%\(tenant_id\)s

View File

@ -1,9 +0,0 @@
==========================
openstack service overview
==========================
The openstack service provides...
The openstack service consists of the following components:
``ec2api_tempest_plugin-api`` service
Accepts and responds to end user compute API calls...

View File

@ -1,17 +0,0 @@
====================================
openstack service installation guide
====================================
.. toctree::
:maxdepth: 2
get_started.rst
install.rst
verify.rst
next-steps.rst
The openstack service (ec2api_tempest_plugin) provides...
This chapter assumes a working setup of OpenStack following the
`OpenStack Installation Tutorial
<https://docs.openstack.org/project-install-guide/ocata/>`_.

View File

@ -1,34 +0,0 @@
.. _install-obs:
Install and configure for openSUSE and SUSE Linux Enterprise
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the openstack service
for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# zypper --quiet --non-interactive install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the openstack services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-ec2api_tempest_plugin-api.service
# systemctl start openstack-ec2api_tempest_plugin-api.service

View File

@ -1,33 +0,0 @@
.. _install-rdo:
Install and configure for Red Hat Enterprise Linux and CentOS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the openstack service
for Red Hat Enterprise Linux 7 and CentOS 7.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# yum install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the openstack services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-ec2api_tempest_plugin-api.service
# systemctl start openstack-ec2api_tempest_plugin-api.service

View File

@ -1,31 +0,0 @@
.. _install-ubuntu:
Install and configure for Ubuntu
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the openstack
service for Ubuntu 14.04 (LTS).
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# apt-get update
# apt-get install
.. include:: common_configure.rst
Finalize installation
---------------------
Restart the openstack services:
.. code-block:: console
# service openstack-ec2api_tempest_plugin-api restart

View File

@ -1,20 +0,0 @@
.. _install:
Install and configure
~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the
openstack service, code-named ec2api_tempest_plugin, on the controller node.
This section assumes that you already have a working OpenStack
environment with at least the following components installed:
.. (add the appropriate services here and further notes)
Note that installation and configuration vary by distribution.
.. toctree::
:maxdepth: 2
install-obs.rst
install-rdo.rst
install-ubuntu.rst

View File

@ -1,9 +0,0 @@
.. _next-steps:
Next steps
~~~~~~~~~~
Your OpenStack environment now includes the ec2api_tempest_plugin service.
To add additional services, see
https://docs.openstack.org/project-install-guide/ocata/.

View File

@ -1,24 +0,0 @@
.. _verify:
Verify operation
~~~~~~~~~~~~~~~~
Verify operation of the openstack service.
.. note::
Perform these commands on the controller node.
#. Source the ``admin`` project credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. List service components to verify successful launch and registration
of each process:
.. code-block:: console
$ openstack openstack service list

View File

@ -1,7 +0,0 @@
========
Usage
========
To use openstack in a project::
import ec2api_tempest_plugin

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,5 +0,0 @@
==========
References
==========
References of openstack.

View File

@ -1,5 +0,0 @@
===========
Users guide
===========
Users guide of openstack.

View File

@ -813,10 +813,7 @@ class EC2TestCase(base.BaseTestCase):
return instances[0]
def get_instance_bdm(self, instance_id, device_name):
"""
device_name=None means getting bdm of root instance device
"""
# device_name=None means getting bdm of root instance device
instance = self.get_instance(instance_id)
if not device_name:
device_name = instance.get('RootDeviceName')

View File

@ -1,86 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import functools
import os
import testtools
import tempest.test
def skip(*args, **kwargs):
"""A decorator useful to skip tests with message."""
def decorator(f):
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
if "bug" in kwargs:
msg = "Skipped until Bug %s is resolved." % kwargs["bug"]
else:
msg = kwargs["msg"]
raise testtools.TestCase.skipException(msg)
return wrapper
return decorator
class TestCasePreparationError(Exception):
def __init__(self, msg="Error in test case preparation"):
self.msg = msg
def __str__(self):
return self.msg
class BaseTest(tempest.test.BaseTestCase):
"""Base class for Cloudscaling tests"""
pass
class BaseBenchmarkTest(BaseTest):
"""Base class for Cloudscaling tests"""
@classmethod
def _load_benchmark_data(cls, class_name):
cfg = cls.config.cloudscaling
if not cfg.benchmark_data:
return None
config = ConfigParser.ConfigParser()
f = open(os.path.expanduser(cfg.benchmark_data))
config.readfp(f)
f.close()
items = config.items(class_name)
result_items = {}
for item in items:
boundaries = item[1].split("-")
if len(boundaries) == 2:
result_items[item[0]] = (boundaries[0], boundaries[1])
cls.benchmark_data = result_items
def _get_benchmark_data(self):
return self.benchmark_data
def _get_benchmark_result(self, result_name=None):
if not hasattr(self, 'benchmark_data'):
return None
key = self._testMethodName.lower()
if result_name is not None:
key += "." + result_name
if key in self.benchmark_data:
return self.benchmark_data[key]
return None

View File

@ -1,361 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
from boto import exception as boto_exception
import netaddr
from tempest import auth
from tempest import clients as base_clients
from tempest.cloudscaling import base
from tempest.cloudscaling.thirdparty.scenario.aws_compat import clients
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest import test as base_test
from tempest.thirdparty.boto import test
from tempest.thirdparty.boto.utils import wait as boto_wait
VOLUME_SIZE = 1
class BaseAWSTest(base.BaseTest, test.BotoTestCase):
"""Base class for AWS compat Cloudscaling tests"""
@classmethod
def setUpClass(cls):
super(BaseAWSTest, cls).setUpClass()
cls.os = clients.Manager()
cls.ec2_client = cls.os.ec2api_client
cls.vpc_client = cls.os.vpc_client
cls.config = config.CONF
cls.instance_type = cls.config.boto.instance_type
@classmethod
def _processException(cls, exc):
if isinstance(exc, boto_exception.EC2ResponseError):
value = getattr(exc, "message", None)
if not value:
value = getattr(exc, "error_message", None)
msg = str(exc.error_code) + ": " + str(value)
return (base_test.TestResultLabel.ERROR, msg)
return super(BaseAWSTest, cls)._processException(exc)
@classmethod
def _prepare_image_id(cls, image_name):
"""Searches existing available image ID by given name pattern"""
images = cls.ec2_client.get_all_images(filters={
"name": image_name,
"image-type": "machine",
"is-public": "true"})
# NOTE(apavlov) There is no filtering in nova-api-ec2. Filter here.
filtered_images = []
for image in images:
if not fnmatch.fnmatch(image.name, image_name):
continue
if image.type != "machine":
continue
if not image.is_public:
continue
filtered_images.append(image)
if len(filtered_images) > 0:
return filtered_images[0].id
return image_name
@classmethod
def _prepare_key_pair(cls):
"""Key-pair preparation"""
keypair_name = data_utils.rand_name("keypair-")
keypair = cls.ec2_client.create_key_pair(keypair_name)
if keypair is None or keypair.name is None:
raise base.TestCasePreparationError("Can`t create keypair")
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
keypair_name)
return keypair
@classmethod
def _prepare_security_group(cls):
"""Security-group preparation"""
sec_group_name = data_utils.rand_name("securitygroup-")
group_desc = sec_group_name + " security group description "
security_group = cls.ec2_client.create_security_group(
sec_group_name, group_desc)
if security_group is None or security_group.name is None:
raise base.TestCasePreparationError("Can't create security group")
cls.addResourceCleanUp(cls.destroy_security_group_wait,
security_group)
result = cls.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1)
if not result:
raise base.TestCasePreparationError(
"Can`t authorize security group")
result = cls.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22)
if not result:
raise base.TestCasePreparationError(
"Can`t authorize security group")
return security_group
@classmethod
def _destroy_security_group_wait(cls, group):
def _delete():
cls.ec2_client.delete_security_group(group_id=group.id)
boto_wait.wait_no_exception(_delete)
@classmethod
def _destroy_internet_gateway(cls, internet_gateway):
igs = cls.vpc_client.get_all_internet_gateways(
internet_gateway_ids=[internet_gateway.id])
if len(igs) == 0:
return
ig = igs[0]
for attachment in ig.attachments:
cls.vpc_client.detach_internet_gateway(ig.id, attachment.vpc_id)
cls.vpc_client.delete_internet_gateway(ig.id)
@classmethod
def _delete_subnet_wait(cls, subnet):
def _delete():
cls.vpc_client.delete_subnet(subnet.id)
boto_wait.wait_no_exception(_delete)
@classmethod
def _prepare_public_ip(cls, instance, network_interface_id=None):
"""Public IP preparation"""
ip_address = instance.ip_address
if ip_address is None or ip_address == instance.private_ip_address:
domain = "vpc" if instance.vpc_id is not None else None
address = cls.ec2_client.allocate_address(domain)
if address is None or not address.public_ip:
raise base.TestCasePreparationError(
"Can't allocate public IP")
if domain is None:
# NOTE(ft): this is temporary workaround for OS
# it must be removed after VPC integration
cls.addResourceCleanUp(address.delete)
status = address.associate(instance.id)
if not status:
raise base.TestCasePreparationError(
"Can't associate IP with instance")
cls.addResourceCleanUp(address.disassociate)
else:
cls.addResourceCleanUp(cls.ec2_client.release_address,
allocation_id=address.allocation_id)
if network_interface_id:
status = cls.ec2_client.associate_address(
allocation_id=address.allocation_id,
network_interface_id=network_interface_id)
else:
status = cls.ec2_client.associate_address(
instance.id, allocation_id=address.allocation_id)
if not status:
raise base.TestCasePreparationError(
"Can't associate IP with instance")
addresses = cls.ec2_client.get_all_addresses(
allocation_ids=[address.allocation_id])
if addresses is None or len(addresses) != 1:
raise base.TestCasePreparationError(
"Can't get address by allocation_id")
address = addresses[0]
cls.addResourceCleanUp(cls.ec2_client.disassociate_address,
association_id=address.association_id)
instance.update()
ip_address = address.public_ip
return ip_address
@classmethod
def _wait_instance_state(cls, instance, final_set):
if not isinstance(final_set, set):
final_set = set((final_set,))
final_set |= cls.gone_set
lfunction = cls.get_lfunction_gone(instance)
state = boto_wait.state_wait(lfunction, final_set,
cls.valid_instance_state)
if state not in final_set:
raise base.TestCasePreparationError("Error in waiting for "
"instance(state = '%s')" % state)
@classmethod
def _correct_ns_if_needed(cls, ssh):
try:
ssh.exec_command("host www.com")
except exceptions.SSHExecCommandFailed:
# NOTE(apavlov) update nameservers (mandatory for local devstack)
ssh.exec_command("sudo su -c 'echo nameserver 8.8.8.8 "
"> /etc/resolv.conf'")
ssh.exec_command("host www.com")
@classmethod
def _prepare_ebs_image(cls):
if cls.config.cloudscaling.ebs_image_id:
return cls.config.cloudscaling.ebs_image_id
if not cls.config.cloudscaling.image_id_ami:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires image_id_ami setting")))
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if not cls.config.service_available.nova:
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
raise cls.skipException(skip_msg)
admin_creds = auth.get_default_credentials('compute_admin')
os = base_clients.Manager(admin_creds, interface='json')
cls.os = os
cls.volumes_client = os.volumes_client
cls.servers_client = os.servers_client
cls.images_client = os.images_client
cls.snapshots_client = os.snapshots_client
# NOTE(apavlov): create volume
resp, volume = cls.volumes_client.create_volume(VOLUME_SIZE,
display_name="aws_volume")
assert 200 == resp.status
cls.addResourceCleanUp(cls._delete_volume, volume['id'])
cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
# NOTE(apavlov): boot instance
bdm = [{
"volume_id": volume['id'],
"delete_on_termination": "1",
"device_name": "/dev/vda"}]
resp, server = cls.servers_client.create_server(
"aws_instance",
cls.config.cloudscaling.image_id_ami,
cls.config.compute.flavor_ref,
block_device_mapping=bdm)
assert 202 == resp.status
rc_server = cls.addResourceCleanUp(cls.servers_client.delete_server,
server['id'])
cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
# NOTE(apavlov): create image from instance
image_name = data_utils.rand_name("aws_ebs_image-")
resp, _ = cls.images_client.create_image(server['id'],
image_name)
assert 202 == resp.status
cls.image_id = resp["location"].split('/')[-1]
cls.addResourceCleanUp(cls.images_client.delete_image,
cls.image_id)
# NOTE(apavlov): delete instance
cls.cancelResourceCleanUp(rc_server)
cls.servers_client.delete_server(server['id'])
cls.servers_client.wait_for_server_termination(server['id'])
images = cls.ec2_client.get_all_images()
for image in images:
if image_name in image.location:
return image.id
raise base.TestCasePreparationError("Can't find ebs image.")
@classmethod
def _delete_volume(cls, volume_id):
resp, result = cls.snapshots_client.list_snapshots(
{"volume_id": volume_id})
if 200 == resp.status:
for snapshot in result:
cls.snapshots_client.delete_snapshot(snapshot['id'])
cls.snapshots_client.wait_for_resource_deletion(snapshot['id'])
cls.volumes_client.delete_volume(volume_id)
class BaseVPCTest(BaseAWSTest):
"""Base class for AWS VPC behavior tests."""
@classmethod
@base_test.safe_setup
def setUpClass(cls):
super(BaseVPCTest, cls).setUpClass()
cls.zone = cls.config.boto.aws_zone
cfg = cls.config.cloudscaling
cls.ssh_user = cfg.general_ssh_user_name
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
(cls.subnet_cidr,) = cls.vpc_cidr.subnet(cfg.vpc_subnet_prefix, 1)
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
cls.keypair = cls._prepare_key_pair()
@classmethod
def _tune_vpc(cls, vpc):
ig = cls.vpc_client.create_internet_gateway()
if ig is None or not ig.id:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls._destroy_internet_gateway, ig)
status = cls.vpc_client.attach_internet_gateway(ig.id, vpc.id)
if not status:
raise base.TestCasePreparationError()
rtables = cls.vpc_client.get_all_route_tables(
filters=[("vpc-id", vpc.id)])
if rtables is None or len(rtables) != 1:
raise base.TestCasePreparationError()
status = cls.vpc_client.create_route(rtables[0].id, "0.0.0.0/0",
gateway_id=ig.id)
if not status:
raise base.TestCasePreparationError()
secgroups = cls.vpc_client.get_all_security_groups(
filters={"vpc-id": vpc.id})
if secgroups is None or len(secgroups) != 1:
raise base.TestCasePreparationError()
status = cls.vpc_client.authorize_security_group(
group_id=secgroups[0].id, ip_protocol="-1",
from_port=-1, to_port=-1, cidr_ip="0.0.0.0/0")
if not status:
raise base.TestCasePreparationError()
@classmethod
def _prepare_vpc(cls, vpc_cidr, sn_cidr):
# NOTE(Alex) The following code is introduced for OpenStack
# and potentially requires fix in boto. See details in
# test_vpc_nat_scenario.
dhcp_opts = cls.vpc_client.create_dhcp_options(
domain_name_servers=['8.8.8.8'])
if dhcp_opts is None or not dhcp_opts.id:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls.vpc_client.delete_dhcp_options,
dhcp_opts.id)
vpc = cls.vpc_client.create_vpc(str(vpc_cidr))
if vpc is None or not vpc.id:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls.vpc_client.delete_vpc, vpc.id)
if not cls.vpc_client.associate_dhcp_options(dhcp_opts.id, vpc.id):
raise base.TestCasePreparationError()
cls._tune_vpc(vpc)
sn = cls.vpc_client.create_subnet(vpc.id, str(sn_cidr), cls.zone)
if sn is None or not sn.id:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls._delete_subnet_wait, sn)
return sn

View File

@ -1,225 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
import tempest.cloudscaling.utils as utils
from tempest.lib.common.utils.linux import remote_client
from tempest import test
from tempest.thirdparty.boto.utils import wait as boto_wait
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
LOG = logging.getLogger(__name__)
class InstanceMySQLTest(aws_base.BaseAWSTest):
"""
Test 'Running MySQL on Amazon' (http://aws.amazon.com/articles/1663)
"""
@classmethod
@test.safe_setup
def setUpClass(cls):
super(InstanceMySQLTest, cls).setUpClass()
cfg = cls.config.cloudscaling
image_name = cfg.mysql_image_name
cls.ssh_user = cfg.mysql_ssh_user_name
cls.volume_attach_name = "sdh"
cls.image_id = cls._prepare_image_id(image_name)
cls.keypair = cls._prepare_key_pair()
sg = cls._prepare_security_group()
cls.sec_group_name = sg.name
def test_integration_mysql(self):
"""Test based on http://aws.amazon.com/articles/1663"""
snapshot = self._run_scenario(self._create_mysql_db)
self._run_scenario(self._restore_mysql_db, snapshot=snapshot)
def _run_scenario(self, scenario_func, snapshot=None):
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
reservation = self.ec2_client.run_instances(self.image_id,
instance_type=self.instance_type,
key_name=self.keypair.name,
security_groups=(self.sec_group_name,))
self.addResourceCleanUp(self.destroy_reservation, reservation)
instance = reservation.instances[0]
LOG.info("state: %s", instance.state)
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
# NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
zone = instance.placement
volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
self.assertVolumeStatusWait(volume, "available")
ip_address = self._prepare_public_ip(instance)
ssh = remote_client.RemoteClient(ip_address,
self.ssh_user,
pkey=self.keypair.material)
# NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
# and wait until it will be available
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/" + self.volume_attach_name)
def _volume_state():
volume.update(validate=True)
return volume.status
self.assertVolumeStatusWait(_volume_state, "in-use")
boto_wait.re_search_wait(_volume_state, "in-use")
def _part_state():
current = ssh.get_partitions().split('\n')
if len(current) > len(part_lines):
return 1
if len(current) < len(part_lines):
return -1
return 0
boto_wait.state_wait(_part_state, 1)
part_lines_new = ssh.get_partitions().split('\n')
self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
part_lines = part_lines_new
self._correct_ns_if_needed(ssh)
snapshot = scenario_func(ssh, volume.id)
# NOTE(apavlov): stop this instance(imagine that it will be used)
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
return snapshot
def _create_mysql_db(self, ssh, volume_id):
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -fy")
# install mysql
ssh.exec_command("echo mysql-server-5.1 mysql-server/"
"root_password password rootpass | sudo debconf-set-selections"
"&& echo mysql-server-5.1 mysql-server/"
"root_password_again password rootpass "
"| sudo debconf-set-selections"
"&& echo mysql-server-5.1 mysql-server/"
"start_on_boot boolean true | sudo debconf-set-selections")
ssh.exec_command("sudo apt-get install -y xfsprogs mysql-server")
ssh.exec_command("grep -q xfs /proc/filesystems || sudo modprobe xfs")
ssh.exec_command("sudo mkfs.xfs /dev/" + self.volume_name)
ssh.exec_command("echo '/dev/" + self.volume_name
+ " /vol xfs noatime 0 0' "
"| sudo tee -a /etc/fstab")
ssh.exec_command("sudo mkdir -m 000 /vol && sudo mount /vol")
# NOTE(apavlov): Move the existing database files to the EBS volume.
ssh.exec_command("sudo /etc/init.d/mysql stop"
"&& sudo mkdir /vol/etc /vol/lib /vol/log"
"&& sudo mv /etc/mysql /vol/etc/"
"&& sudo mv /var/lib/mysql /vol/lib/"
"&& sudo mv /var/log/mysql /vol/log/")
ssh.exec_command("sudo mkdir /etc/mysql"
"&& sudo mkdir /var/lib/mysql"
"&& sudo mkdir /var/log/mysql")
ssh.exec_command("echo '/vol/etc/mysql /etc/mysql none bind' "
"| sudo tee -a /etc/fstab"
"&& sudo mount /etc/mysql")
ssh.exec_command("echo '/vol/lib/mysql /var/lib/mysql none bind' "
"| sudo tee -a /etc/fstab"
"&& sudo mount /var/lib/mysql")
ssh.exec_command("echo '/vol/log/mysql /var/log/mysql none bind' "
"| sudo tee -a /etc/fstab"
"&& sudo mount /var/log/mysql")
ssh.exec_command("sudo /etc/init.d/mysql start")
# NOTE(apavlov): add test DB
ssh.exec_command("mysql -u root --password=rootpass -e "
"'CREATE DATABASE tutorial_sample'")
resp = ssh.exec_command("mysql -u root --password=rootpass "
"-e 'SHOW DATABASES'")
self.assertIn("tutorial_sample", resp)
# NOTE(apavlov): make snapshot
ssh.exec_command("mysql -u root --password=rootpass -e '"
"FLUSH TABLES WITH READ LOCK;"
"SHOW MASTER STATUS;"
"SYSTEM sudo xfs_freeze -f /vol;'")
snapshot = self.ec2_client.create_snapshot(volume_id)
self.addResourceCleanUp(self.destroy_snapshot_wait, snapshot)
self.assertSnapshotStatusWait(snapshot, "completed")
ssh.exec_command("mysql -u root --password=rootpass -e '"
"SYSTEM sudo xfs_freeze -u /vol;"
"UNLOCK TABLES;'")
# NOTE(apavlov): cleanup
ssh.exec_command("sudo /etc/init.d/mysql stop"
"&& sudo umount /etc/mysql /var/lib/mysql /var/log/mysql /vol")
return snapshot
def _restore_mysql_db(self, ssh, volume_id):
ssh.exec_command("sudo apt-get update")
ssh.exec_command("sudo apt-get upgrade -y")
# install mysql
ssh.exec_command("export DEBIAN_FRONTEND=noninteractive")
ssh.exec_command("sudo -E apt-get install -y xfsprogs mysql-server")
ssh.exec_command("echo '/dev/" + self.volume_name
+ " /vol xfs noatime 0 0' "
"| sudo tee -a /etc/fstab")
ssh.exec_command("sudo mkdir -m 000 /vol")
ssh.exec_command("sudo mount /vol")
ssh.exec_command("sudo find /vol/{lib,log}/mysql/ ! -user root -print0"
" | sudo xargs -0 -r chown mysql")
ssh.exec_command("sudo find /vol/{lib,log}/mysql/ ! -group root -a !"
" -group adm -print0 | sudo xargs -0 -r chgrp mysql")
ssh.exec_command("sudo /etc/init.d/mysql stop")
ssh.exec_command("echo '/vol/etc/mysql /etc/mysql none bind' "
"| sudo tee -a /etc/fstab")
ssh.exec_command("sudo mount /etc/mysql")
ssh.exec_command("echo '/vol/lib/mysql /var/lib/mysql none bind' "
"| sudo tee -a /etc/fstab")
ssh.exec_command("sudo mount /var/lib/mysql")
ssh.exec_command("echo '/vol/log/mysql /var/log/mysql none bind' "
"| sudo tee -a /etc/fstab")
ssh.exec_command("sudo mount /var/log/mysql")
ssh.exec_command("sudo /etc/init.d/mysql start")
resp = ssh.exec_command("mysql -u root --password=rootpass "
"-e 'SHOW DATABASES'")
self.assertIn("tutorial_sample", resp)
# NOTE(apavlov): cleanup
ssh.exec_command("sudo /etc/init.d/mysql stop"
"&& sudo umount /etc/mysql /var/lib/mysql /var/log/mysql /vol")
return None

View File

@ -1,110 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import content as test_content
import tempest.cloudscaling.base as base
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest.lib.common.utils.linux import remote_client
from tempest.lib import decorators
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
LOG = logging.getLogger(__name__)
class UnixBenchTest(base.BaseBenchmarkTest, aws_base.BaseAWSTest):
"""UnixBench set of tests used to test performance compatibility to AWS"""
@classmethod
@test.safe_setup
def setUpClass(cls):
super(UnixBenchTest, cls).setUpClass()
cls._load_benchmark_data("UnixBenchTest")
cfg = cls.config.cloudscaling
image_name = cfg.general_image_name
cls.ssh_user = cfg.general_ssh_user_name
cls.image_id = cls._prepare_image_id(image_name)
cls.keypair = cls._prepare_key_pair()
sg = cls._prepare_security_group()
cls.sec_group_name = sg.name
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
reservation = cls.ec2_client.run_instances(cls.image_id,
instance_type=cls.instance_type,
key_name=cls.keypair.name,
security_groups=(cls.sec_group_name,))
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
cls.instance = reservation.instances[0]
LOG.info("state: %s", cls.instance.state)
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
cls._wait_instance_state(cls.instance, "running")
cls._prepare_public_ip(cls.instance)
ip_address = cls._prepare_public_ip(cls.instance)
cls.ssh = remote_client.RemoteClient(ip_address,
cls.ssh_user,
pkey=cls.keypair.material)
@decorators.attr(type='benchmark')
def test_run_benchmark(self):
"""Run UnixBench test on prepared instance"""
if self.ssh is None:
raise self.skipException("Booting failed")
ssh = self.ssh
self._correct_ns_if_needed(ssh)
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -fy")
ssh.exec_command("sudo apt-get update")
ssh.exec_command("sudo apt-get install -y make gcc")
ssh.exec_command("sudo apt-get install -y libx11-dev libgl1-mesa-dev "
"libxext-dev perl perl-modules")
ssh.exec_command("wget http://byte-unixbench.googlecode.com/files"
"/UnixBench5.1.3.tgz")
ssh.exec_command("tar xvf UnixBench5.1.3.tgz")
resp = ssh.exec_command("cd UnixBench && ./Run")
i = resp.find("---------------")
if i != -1:
resp = resp[i:]
resp = "zone: " + self.instance.placement + "\n" + resp
fail = None
reference = self._get_benchmark_data()
for k, v in reference.iteritems():
i1 = resp.lower().find(k)
if i1 == -1:
continue
k = resp[i1:i1 + len(k)]
i2 = resp.find("\n", i1)
outp = resp[i1 + len(k):i2].split()[:2]
if len(outp) < 2:
continue
self.addDetail(k, test_content.text_content(
outp[1] + "|" + outp[0] + "|Min: " + v[0] + "|Max: " + v[1]))
if fail is None and float(outp[0]) < float(v[0]):
fail = (outp[0], outp[1], k, v[0])
if fail is not None:
self.assertGreaterEqual(fail[0], fail[1],
fail[2] + ": " +
fail[0] + " " + fail[1] + " (current) < " +
fail[3] + " " + fail[1] + " (AWS)")

View File

@ -1,250 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from testtools import content as test_content
import tempest.cloudscaling.base as base
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
import tempest.cloudscaling.utils as utils
from tempest.lib.common.utils.linux import remote_client
from tempest.lib import decorators
from tempest.thirdparty.boto.utils import wait as boto_wait
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
LOG = logging.getLogger(__name__)
class VolumeBenchmarkTest(base.BaseBenchmarkTest, aws_base.BaseAWSTest):
class Context:
instance = None
ssh = None
volume = None
part_lines = None
volume_ready = False
volume_filled = False
snapshot = None
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VolumeBenchmarkTest, cls).setUpClass()
cls._load_benchmark_data("VolumeBenchmarkTest")
cfg = cls.config.cloudscaling
image_name = cfg.general_image_name
cls.ssh_user = cfg.general_ssh_user_name
cls.volume_size = cfg.volume_benchmark_volume_size_gb
cls.volume_fill = cfg.volume_benchmark_volume_fill_percent
cls.volume_attach_name = "sdh"
cls.ctx = cls.Context()
cls.image_id = cls._prepare_image_id(image_name)
cls.keypair = cls._prepare_key_pair()
sg = cls._prepare_security_group()
cls.sec_group_name = sg.name
# NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
reservation = cls.ec2_client.run_instances(cls.image_id,
instance_type=cls.instance_type,
key_name=cls.keypair.name,
security_groups=(cls.sec_group_name,))
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
instance = reservation.instances[0]
LOG.info("state: %s", instance.state)
# NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
cls._wait_instance_state(instance, "running")
cls.ctx.instance = instance
ip_address = cls._prepare_public_ip(instance)
ssh = remote_client.RemoteClient(ip_address,
cls.ssh_user,
pkey=cls.keypair.material)
cls.ctx.ssh = ssh
def _volume_state(self):
self.ctx.volume.update(validate=True)
return self.ctx.volume.status
def _part_state(self):
current = self.ctx.ssh.get_partitions().split('\n')
if len(current) > len(self.ctx.part_lines):
return 1
if len(current) < len(self.ctx.part_lines):
return -1
return 0
def _start_test(self):
self.start_time = time.time()
def _end_test(self, detail_description):
end_time = time.time()
self.test_time = end_time - self.start_time
content = test_content.text_content(
detail_description + " time: " + str(self.test_time) + "s")
self.addDetail("Current", content)
reference_time = self._get_benchmark_result()
if reference_time is not None:
content = test_content.text_content(
"Min time: " + str(reference_time[0]) + "s, " +
"Max time: " + str(reference_time[1]) + "s")
self.addDetail("AWS", content)
def _check_test(self):
reference_time = self._get_benchmark_result()
if reference_time is not None:
self.assertLessEqual(self.test_time, float(reference_time[1]),
str(self.test_time) + "s (current) > " +
reference_time[1] + "s (AWS)")
@decorators.attr(type='benchmark')
def test_001_attach_volume(self):
"""Attach volume"""
if self.ctx.ssh is None:
raise self.skipException("Booting failed")
self._start_test()
# NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
zone = self.ctx.instance.placement
volume = self.ec2_client.create_volume(self.volume_size, zone)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
self.ctx.volume = volume
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
self.assertVolumeStatusWait(volume, "available")
# NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
# and wait until it will be available
self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)
# NOTE(apavlov): "attaching" invalid EC2 status #1074901
self.assertVolumeStatusWait(self._volume_state, "in-use")
boto_wait.re_search_wait(self._volume_state, "in-use")
boto_wait.state_wait(self._part_state, 1)
part_lines_new = self.ctx.ssh.get_partitions().split('\n')
volume_name = utils.detect_new_volume(self.ctx.part_lines,
part_lines_new)
self.ctx.part_lines = part_lines_new
self._end_test("Create and attach volume")
self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
"&& sudo mkfs.ext3 /dev/" + volume_name)
self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
"&& sudo mount /dev/" + volume_name + " /vol")
self.ctx.volume_ready = True
self._check_test()
@decorators.attr(type='benchmark')
def test_002_fill_volume(self):
"""Fill volume with data"""
if self.ctx.ssh is None:
raise self.skipException("Booting failed")
if not self.ctx.volume_ready:
raise self.skipException("Volume preparation failed")
self._start_test()
self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol/data")
file_lines = 102 * int(self.volume_size)
for i in xrange(int(self.volume_fill)):
self.ctx.ssh.exec_command("cat /dev/urandom "
"| tr -d -c 'a-zA-Z0-9' "
"| fold -w 1020 "
"| head -n " + str(file_lines) +
" > /vol/data/file" + str(i))
self._end_test("Volume filling")
self.ctx.volume_filled = True
self._check_test()
@decorators.attr(type='benchmark')
def test_003_snapshot_volume(self):
"""Snapshot volume"""
if self.ctx.ssh is None:
raise self.skipException("Booting failed")
if not self.ctx.volume_filled:
raise self.skipException("Volume filling failed")
self._start_test()
snapshot = self.ec2_client.create_snapshot(self.ctx.volume.id)
self.addResourceCleanUp(self.destroy_snapshot_wait, snapshot)
self.assertSnapshotStatusWait(snapshot, "completed")
self._end_test("Snapshot creation")
self.ctx.snapshot = snapshot
self._check_test()
@decorators.attr(type='benchmark')
def test_004_clone_volume_snapshot(self):
"""Clone volume"""
if self.ctx.ssh is None:
raise self.skipException("Booting failed")
if self.ctx.snapshot is None:
raise self.skipException("Snapshot of volume failed")
self._start_test()
zone = self.ctx.instance.placement
volume2 = self.ec2_client.create_volume(
self.volume_size, zone, snapshot=self.ctx.snapshot)
self.addResourceCleanUp(self.destroy_volume_wait, volume2)
# NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
self.assertVolumeStatusWait(volume2, "available")
self._end_test("Volume creation by snapshot")
self._check_test()
@decorators.attr(type='benchmark')
def test_005_detach_volume(self):
"""Detach volume"""
if self.ctx.ssh is None:
raise self.skipException("Booting failed")
if not self.ctx.volume_ready:
raise self.skipException("Volume preparation failed")
self._start_test()
self.ctx.ssh.exec_command("sudo umount /vol")
self.ctx.volume.detach()
# NOTE(apavlov): "detaching" invalid EC2 status #1074901
self.assertVolumeStatusWait(self._volume_state, "available")
boto_wait.re_search_wait(self._volume_state, "available")
self._end_test("Detach volume")
boto_wait.state_wait(self._part_state, -1)
self._check_test()

View File

@ -1,297 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import boto.exception
import netaddr
from tempest.cloudscaling import base
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest.lib.common.utils.linux import remote_client
from tempest import test
from tempest.thirdparty.boto.utils import wait as boto_wait
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class VPC_Behavior_Base(aws_base.BaseVPCTest):
"""Base class for AWS VPC behavior tests."""
@classmethod
def _run_instance(cls, subnet, private_ip=None):
params = {
"key_name": cls.keypair.name,
"instance_type": cls.instance_type,
"placement": cls.zone,
"subnet_id": subnet.id,
}
if private_ip:
params["private_ip_address"] = str(private_ip)
reservation = cls.vpc_client.run_instances(cls.image_id,
**params)
if reservation is None:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
if len(reservation.instances) != 1:
raise base.TestCasePreparationError()
instance = reservation.instances[0]
return instance
class VPC_Behavior(VPC_Behavior_Base):
"""Test various behavior of VPC network."""
class TcpDumpRunner(object):
timeout = None
def __init__(self, instance, ssh_user, ssh_keypair, parameters):
ssh = remote_client.RemoteClient(instance.ip_address,
ssh_user,
pkey=ssh_keypair.material)
ssh.ssh_client.channel_timeout = float(self.timeout)
self.ssh = ssh
self.parameters = parameters
self.thread = None
def __enter__(self):
self.ssh.exec_command("rm -f tcpdump.log")
thread = threading.Thread(target=self._run_tcpdump)
thread.start()
self._sync()
self.thread = thread
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
self.stop()
def _run_tcpdump(self):
self.ssh.exec_command("sudo tcpdump %s >tcpdump.log 2>&1" %
self.parameters)
def _sync(self):
def check_tcpdump_is_ready():
resp = self.ssh.exec_command("test -f tcpdump.log && echo 1 "
"|| echo 0")
return int(resp) == 1
boto_wait.state_wait(check_tcpdump_is_ready, True)
def stop(self):
if self.thread is None:
return
self.ssh.exec_command("sudo pkill -SIGINT tcpdump")
thread = self.thread
self.thread = None
thread.join(float(self.timeout))
return not thread.is_alive()
def get_result(self):
resp = self.ssh.exec_command("cat tcpdump.log")
return resp
class Context(object):
instance3 = None
lease_file = None
gateway = None
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VPC_Behavior, cls).setUpClass()
cls.TcpDumpRunner.timeout = cls.config.boto.build_timeout
cls.subnet = cls._prepare_vpc(cls.vpc_cidr, cls.subnet_cidr)
cls.instance1 = cls._run_instance(cls.subnet)
cls.instance2 = cls._run_instance(cls.subnet)
cls._wait_instance_state(cls.instance1, "running")
cls._wait_instance_state(cls.instance2, "running")
cls.instance1.ip_address = cls._prepare_public_ip(cls.instance1)
ssh = remote_client.RemoteClient(cls.instance1.ip_address,
cls.ssh_user,
pkey=cls.keypair.material)
ssh.exec_command("sudo apt-get update")
ssh.exec_command("sudo DEBIAN_FRONTEND=noninteractive apt-get -fqy "
"install socat nmap")
cls.ctx = cls.Context()
def test_011_check_network_gateway(self):
"""Is gateway local to subnet?"""
ssh = remote_client.RemoteClient(self.instance1.ip_address,
self.ssh_user,
pkey=self.keypair.material)
resp = ssh.exec_command("route -n | awk '{ if ($1==\"0.0.0.0\" && "
"$4 ~ /.*G.*/) print $2 }'")
lines = resp.splitlines()
self.assertEqual(1, len(lines))
gateway = netaddr.IPAddress(lines[0])
self.ctx.gateway = gateway
self.assertTrue(gateway in self.subnet_cidr)
def test_012_check_dhcp_grant_ip(self):
"""Whether dhcp provide IP address?"""
instance = self._run_instance(self.subnet)
state = self.waitInstanceState(instance, "running")
if state != "running":
raise base.TestCasePreparationError()
self.assertTrue(instance.private_ip_address)
instance.ip_address = self._prepare_public_ip(instance)
self.ctx.instance3 = instance
def test_013_check_dhcp_lease(self):
"""Whether IP address was obtained by dhcp?"""
if self.ctx.instance3 is None:
self.skipTest("Instance 3 was not initialized")
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
self.ssh_user,
pkey=self.keypair.material)
resp = ssh.exec_command("ps -eo comm,args | grep -m 1 dhclient")
args = resp.split()
if len(args) <= 2 or not args[0].startswith('dhclient'):
raise base.TestCasePreparationError()
is_lf = False
lease_file = "/var/lib/dhcp/dhclient.leases"
for arg in args:
if is_lf:
lease_file = arg
is_lf = False
elif arg == "-lf":
is_lf = True
resp = ssh.exec_command("test -f %s && echo 1 || echo 0" % lease_file)
self.assertEqual(1, int(resp))
self.ctx.lease_file = lease_file
resp = ssh.exec_command("grep 'fixed-address ' %s | tail -n 1 | "
"awk '{ print $2 }' | sed -e 's/;//'" %
lease_file)
lines = resp.splitlines()
self.assertEqual(1, len(lines))
self.assertEqual(self.ctx.instance3.private_ip_address, lines[0])
date = ssh.exec_command("date -u +%Y/%m/%d%H:%M:%S")
self.assertTrue(date)
resp = ssh.exec_command("grep 'renew ' %s | tail -n 1 | "
"awk '{ print $3$4 }' | sed -e 's/;//'" %
lease_file)
self.assertLess(date, resp)
def test_014_check_dhcp_sends_mtu_size(self):
"""Check DHCP sends MTU size."""
if self.ctx.lease_file is None:
self.skipTest("Dhcp lease file was not found")
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
self.ssh_user,
pkey=self.keypair.material)
resp = ssh.exec_command("grep 'option interface-mtu ' %s" %
self.ctx.lease_file)
self.assertLess(0, len(resp.splitlines()))
def test_015_check_dhcp_distribute_host_name_size(self):
"""Check DHCP distributes host hame."""
if self.ctx.lease_file is None:
self.skipTest("Dhcp lease file was not found")
ssh = remote_client.RemoteClient(self.ctx.instance3.ip_address,
self.ssh_user,
pkey=self.keypair.material)
resp = ssh.exec_command("grep 'option host-name ' %s" %
self.ctx.lease_file)
self.assertLess(0, len(resp.splitlines()))
def test_021_check_traffic_visibility(self):
"""Are other VMs visible?"""
if self.ctx.instance3 is None:
self.skipTest("Instance 3 was not initialized")
with self.TcpDumpRunner(self.ctx.instance3,
self.ssh_user,
self.keypair,
"ip proto \\\\icmp") as tdump:
ssh = remote_client.RemoteClient(self.instance1.ip_address,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("ping -c 1 %s" %
self.instance2.private_ip_address)
if not tdump.stop():
raise base.TestCasePreparationError()
resp = tdump.get_result()
for line in resp.splitlines():
if line.endswith("packets captured"):
captured = line
break
tokens = captured.split()
packets = int(tokens[0])
self.assertEqual(0, packets)
def test_022_check_broadcast_visible(self):
"""Is broadcast traffic visible?"""
if self.ctx.instance3 is None:
self.skipTest("Instance 3 was not initialized")
with self.TcpDumpRunner(self.ctx.instance3,
self.ssh_user,
self.keypair,
"ip broadcast") as tdump:
ssh = remote_client.RemoteClient(self.instance1.ip_address,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("echo ping |"
"socat - UDP4-DATAGRAM:255.255.255.255:6666,"
"broadcast")
if not tdump.stop():
raise base.TestCasePreparationError()
resp = tdump.get_result()
captured = ""
for line in resp.splitlines():
if line.endswith(" captured"):
captured = line
break
tokens = captured.split()
packets = int(tokens[0])
self.assertEqual(0, packets)
def test_023_check_multicast_visible(self):
"""Is multicast traffic visible?"""
if self.ctx.instance3 is None:
self.skipTest("Instance 3 was not initialized")
with self.TcpDumpRunner(self.ctx.instance3,
self.ssh_user,
self.keypair,
"ip multicast") as tdump:
ssh = remote_client.RemoteClient(self.instance1.ip_address,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("echo ping |"
"socat - UDP4-DATAGRAM:239.1.1.1:6666")
if not tdump.stop():
raise base.TestCasePreparationError()
resp = tdump.get_result()
captured = ""
for line in resp.splitlines():
if line.endswith(" captured"):
captured = line
break
tokens = captured.split()
packets = int(tokens[0])
self.assertEqual(0, packets)
def test_031_scan_gateway_ports(self):
"""Are gateway ports closed?"""
if self.ctx.gateway is None:
self.skipTest("Subnet's gateway was not found")
ssh = remote_client.RemoteClient(self.instance1.ip_address,
self.ssh_user,
pkey=self.keypair.material)
ssh.ssh_client.channel_timeout = 600
resp = ssh.exec_command("sudo nmap -PN %s" % str(self.ctx.gateway))
all_closed_msg = ("All 1000 scanned ports on %s are " %
str(self.ctx.gateway))
for line in resp.splitlines():
if line.startswith(all_closed_msg):
return
self.fail("Some gateway ports are open")

View File

@ -1,188 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import select
from testtools import content as test_content
from tempest.cloudscaling import base
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest import exceptions
from tempest.lib.common.utils.linux import remote_client
from tempest.lib import decorators
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class IPerfServer(object):
"""Wrapper to use iperf server in tests."""
cmd = "iperf -s"
def __init__(self, instance, ssh_user, ssh_keypair):
self.instance = instance
self.ssh_user = ssh_user
self.ssh_keypair = ssh_keypair
def __enter__(self):
# NOTE(ft): Iperf doesn't close stdout in server mode
# but standard exec_command waits for it
# so instead of use it we waits for some string in output
ssh = remote_client.RemoteClient(self.instance.ip_address,
self.ssh_user,
pkey=self.ssh_keypair.material)
ssh_conn = ssh.ssh_client._get_ssh_connection()
chan = ssh_conn.get_transport().open_session()
chan.get_pty() # NOTE(ft): to stop iperf with session end
chan.fileno()
chan.exec_command(self.cmd)
started = False
out_data = []
err_data = []
select_params = [chan], [], [], ssh.ssh_client.channel_timeout
while True:
ready = select.select(*select_params)
if not any(ready):
raise exceptions.TimeoutException(
"Cannot start iperf server on host '{1}'.".format(
self.host))
if not ready[0]:
continue
out_chunk = err_chunk = None
if chan.recv_ready():
out_chunk = chan.recv(ssh.ssh_client.buf_size)
out_data += out_chunk,
if chan.recv_stderr_ready():
err_chunk = chan.recv_stderr(ssh.ssh_client.buf_size)
err_data += err_chunk,
if chan.exit_status_ready():
exit_status = chan.recv_exit_status()
if 0 != exit_status or len(err_data) > 0:
raise exceptions.SSHExecCommandFailed(
command=self.cmd, exit_status=exit_status,
strerror=''.join(err_data))
lines = ''.join(out_data).splitlines()
for line in lines:
if line.startswith("Server listening"):
started = True
break
if (started or
chan.closed and not err_chunk and not out_chunk):
break
self.ssh = ssh
self.ssh_conn = ssh_conn
self.chan = chan
def __exit__(self, ex_type, ex_value, ex_traceback):
self.chan.close()
self.ssh_conn.close()
class VPC_Benchmark(aws_base.BaseVPCTest, base.BaseBenchmarkTest):
"""Benchmark VPC network throughput."""
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VPC_Benchmark, cls).setUpClass()
cls.keypair = cls._prepare_key_pair()
subnet = cls._prepare_vpc(cls.vpc_cidr, cls.subnet_cidr)
reservation = cls.vpc_client.run_instances(
cls.image_id,
min_count=2, max_count=2,
key_name=cls.keypair.name,
instance_type=cls.instance_type,
placement=cls.zone,
subnet_id=subnet.id)
if reservation is None:
raise base.TestCasePreparationError()
cls.addResourceCleanUp(cls.destroy_reservation, reservation)
if len(reservation.instances) != 2:
raise base.TestCasePreparationError()
cls.instance1 = reservation.instances[0]
cls.instance2 = reservation.instances[1]
cls._wait_instance_state(cls.instance1, "running")
cls._wait_instance_state(cls.instance2, "running")
cls._prepare_public_ip(cls.instance1)
cls._prepare_public_ip(cls.instance2)
def install_iperf(instance):
try:
ssh = remote_client.RemoteClient(instance.ip_address,
cls.ssh_user,
pkey=cls.keypair.material)
except exceptions.SSHTimeout:
raise base.TestCasePreparationError()
ssh.exec_command("sudo apt-get update && sudo apt-get upgrade -y")
ssh.exec_command("sudo apt-get update")
ssh.exec_command("sudo apt-get install iperf")
install_iperf(cls.instance1)
install_iperf(cls.instance2)
cfg = cls.config.cloudscaling
cls.network_performance_class = cfg.network_performance_class
cls._load_benchmark_data("AWS_VPC_Benchmark")
def _get_rate(self, resp):
resp_items = resp.split(",")
rate = resp_items[len(resp_items) - 1]
return int(rate) / 1000000
def _check_test(self, rate):
if not self.network_performance_class:
return
reference = self._get_benchmark_result(self.network_performance_class)
if reference is not None:
content = test_content.text_content(
"Min rate: %sMbits/sec, Max rate: %sMBits/sec" %
(reference[0], reference[1]))
self.addDetail("AWS", content)
self.assertGreaterEqual(rate, float(reference[0]),
"%sMbits/sec (current) < %sMbits/sec (AWS)" %
(rate, reference[0]))
@decorators.attr(type='benchmark')
def test_001_internal_vpc_tcp_150MB_throughput(self):
"""Measure internal VPC network throughput for 150 MBytes transmit."""
if self.keypair is None:
self.skipTest("Environment was not initialized")
with IPerfServer(self.instance1, self.ssh_user, self.keypair):
ssh = remote_client.RemoteClient(self.instance2.ip_address,
self.ssh_user,
pkey=self.keypair.material)
resp = ssh.exec_command("iperf -c %s -n 150M -x CMSV -y C" %
self.instance1.private_ip_address)
rate = self._get_rate(resp)
self.addDetail("Current", test_content.text_content(
"150 MBytes throughput: %s Mbits/sec" % rate))
self._check_test(rate)
@decorators.attr(type='benchmark')
def test_002_internal_vpc_tcp_2mins_throughput(self):
"""Measure internal VPC network throughput for 2 mins transmit."""
if self.keypair is None:
self.skipTest("Environment was not initialized")
with IPerfServer(self.instance1, self.ssh_user, self.keypair):
ssh = remote_client.RemoteClient(self.instance2.ip_address,
self.ssh_user,
pkey=self.keypair.material)
ssh.ssh_client.channel_timeout = 130
resp = ssh.exec_command("iperf -c %s -t 120 -x CMSV -y C" %
self.instance1.private_ip_address)
rate = self._get_rate(resp)
self.addDetail("Current", test_content.text_content(
"2 mins throughput: %s Mbits/sec" % rate))
self._check_test(rate)

View File

@ -1,451 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from boto.ec2 import networkinterface
import netaddr
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils.linux import remote_client
from tempest import test
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class VPC_NAT_Scenario(aws_base.BaseAWSTest):
"""
Based on 'VPC with Public and Private Subnets' scenario
(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html)
Adapted to work with OpenStack with the following differences:
1. DNS is set up via DHCP options to 8.8.8.8 (boto has a bug, see Note).
2. Opened DNS ports (53) in DB and NAT security groups.
3. NAT instance is created with 2 interfaces in different subnets.
4. SourceDestCheck is disabled for second interface of NAT instance.
5. Default route in main route table is set to point to this interface.
As a result, DB instance's default route goes through the second interface
of NAT instance which is in the same subnet as the DB instance.
To allow several private subnets to work through the same NAT, more
secondary interfaces should be added to NAT instance for all of that
subnets, and separate route tables should be created for each of the
subnets.
"""
# NOTE(Alex) At the moment of this test's creation, boto has a bug with
# parsing result of setting up DHCP options. Order of the Key-Values
# returned in OpenStack for the Dictionaries is different from AWS's.
# Potential fix should be done in boto/vpc/dhcpoptions.py:
# DhcpConfigSet can be changed to:
#
# class DhcpConfigSet(dict):
#
# def startElement(self, name, attrs, connection):
# if name == 'valueSet':
# if not hasattr(self, '_value'):
# self._value = DhcpValueSet()
# return self._value
#
# def endElement(self, name, value, connection):
# if name == 'valueSet':
# if hasattr(self, '_name'):
# self[self._name] = self._value
# if name == 'key':
# self._name = value
# if hasattr(self, '_value'):
# self[self._name] = self._value
class Context(object):
vpc = None
internet_gateway = None
web_subnet = None
db_subnet = None
main_route_table = None
custom_route_table = None
web_security_group = None
nat_security_group = None
db_security_group = None
web_instance = None
db_instance = None
nat_instance = None
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VPC_NAT_Scenario, cls).setUpClass()
cls.ctx = cls.Context()
cls.zone = cls.config.boto.aws_zone
cfg = cls.config.cloudscaling
cls.ssh_user = cfg.general_ssh_user_name
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
cls.web_subnet, cls.db_subnet = cls.vpc_cidr.subnet(
cfg.vpc_subnet_prefix, 2)
cls.test_client_cidr = netaddr.IPNetwork(cfg.test_client_cidr)
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
cls.keypair = cls._prepare_key_pair()
@classmethod
def tearDownClass(cls):
if cls.ctx is not None:
for group in [cls.ctx.web_security_group,
cls.ctx.nat_security_group,
cls.ctx.db_security_group]:
if not group:
continue
try:
cls._revoke_security_group_linked_rules(group)
except Exception:
pass
super(VPC_NAT_Scenario, cls).tearDownClass()
@classmethod
def _revoke_security_group_linked_rules(cls, group):
groups = cls.vpc_client.get_all_security_groups(group_ids=[group.id])
if len(groups) == 0:
return
sg = groups[0]
for rule in sg.rules:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group(
group_id=sg.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_security_group_group_id=grant.groupId)
for rule in sg.rules_egress:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group_egress(
sg.id,
rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grant.groupId)
def test_000_create_vpc(self):
"""Create VPC"""
dhcp_opts = self.vpc_client.create_dhcp_options(
domain_name_servers=['8.8.8.8'])
self.assertIsNotNone(dhcp_opts)
self.assertTrue(dhcp_opts.id)
self.addResourceCleanUp(self.vpc_client.delete_dhcp_options,
dhcp_opts.id)
vpc = self.vpc_client.create_vpc(str(self.vpc_cidr))
self.assertIsNotNone(vpc)
self.assertTrue(vpc.id)
self.addResourceCleanUp(self.vpc_client.delete_vpc, vpc.id)
self.assertTrue(self.vpc_client.associate_dhcp_options(dhcp_opts.id,
vpc.id))
self.ctx.vpc = vpc
def test_001_create_internet_gateway(self):
"""Create internet gateway"""
ig = self.vpc_client.create_internet_gateway()
self.assertIsNotNone(ig)
self.assertTrue(ig.id)
self.addResourceCleanUp(self._destroy_internet_gateway, ig)
status = self.vpc_client.attach_internet_gateway(ig.id,
self.ctx.vpc.id)
self.assertTrue(status)
self.ctx.internet_gateway = ig
def test_010_create_subnets(self):
"""Create subnets"""
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.web_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.web_subnet = sn
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.db_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.db_subnet = sn
def test_020_get_main_route_table(self):
"""Describe auto created route table"""
rtables = self.vpc_client.get_all_route_tables(
filters=[("vpc-id", self.ctx.vpc.id)])
self.assertIsNotNone(rtables)
self.assertEqual(1, len(rtables))
self.ctx.main_route_table = rtables[0]
def test_025_create_custom_route_table(self):
"""Create route table for web servers"""
rtable = self.vpc_client.create_route_table(self.ctx.vpc.id)
self.assertIsNotNone(rtable)
self.assertTrue(rtable.id)
self.addResourceCleanUp(self.vpc_client.delete_route_table, rtable.id)
ig = self.ctx.internet_gateway
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
gateway_id=ig.id)
self.assertTrue(status)
association_id = self.vpc_client.associate_route_table(
rtable.id, self.ctx.web_subnet.id)
self.assertTrue(association_id)
self.addResourceCleanUp(self.vpc_client.disassociate_route_table,
association_id)
self.ctx.custom_route_table = rtable
def test_050_create_security_groups(self):
"""Create and tune security groups"""
sg = self.vpc_client.create_security_group(
data_utils.rand_name("WebServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.web_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("NATSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.nat_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("DBServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.db_security_group = sg
sg = self.ctx.web_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 1433, 1433,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 3306, 3306,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 22, 22,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=3389,
to_port=3389, cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.nat_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=53,
to_port=53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="udp", from_port=53,
to_port=53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.db_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=1433,
to_port=1433,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=3306,
to_port=3306,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=22,
to_port=22,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
def test_100_launch_nat_instance(self):
"""Launch instances for NAT server"""
interface_web = networkinterface.NetworkInterfaceSpecification(
subnet_id=self.ctx.web_subnet.id,
groups=[self.ctx.nat_security_group.id])
interface_db = networkinterface.NetworkInterfaceSpecification(
subnet_id=self.ctx.db_subnet.id,
groups=[self.ctx.nat_security_group.id])
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
# security_group_ids=[self.ctx.nat_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
# subnet_id=self.ctx.web_subnet.id
network_interfaces=(
networkinterface.NetworkInterfaceCollection(
interface_web, interface_db))
)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.ip_address = self._prepare_public_ip(
instance,
instance.interfaces[0].id)
internal_interface_id = instance.interfaces[1].id
status = self.vpc_client.modify_network_interface_attribute(
internal_interface_id,
attr='sourceDestCheck',
value=False)
self.assertTrue(status)
rtable = self.ctx.main_route_table
status = self.vpc_client.create_route(
rtable.id, "0.0.0.0/0",
interface_id=internal_interface_id)
self.assertTrue(status)
self.ctx.nat_instance = instance
def test_101_launch_instances(self):
"""Launch instances for web server and db server"""
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.web_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.web_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.ip_address = self._prepare_public_ip(instance)
self.ctx.web_instance = instance
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.db_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.db_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
self.ctx.db_instance = instance
def test_102_tune_nat_instance(self):
"""Tune NAT in NAT instance"""
instance = self.ctx.nat_instance
address = instance.ip_address
ssh = remote_client.RemoteClient(address,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
"-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
ssh.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
ssh.exec_command("echo $'auto eth1\niface eth1 inet dhcp\n' "
"| sudo tee -a /etc/network/interfaces.d/eth1.cfg")
ssh.exec_command("sudo ifup eth1")
def test_200_check_connectivity(self):
"""Check inside and outside connectivities"""
web_ip = self.ctx.web_instance.ip_address
db_ip = self.ctx.db_instance.private_ip_address
ssh = remote_client.RemoteClient(web_ip,
self.ssh_user,
pkey=self.keypair.material)
ssh_conn = ssh.ssh_client._get_ssh_connection()
sftp = ssh_conn.open_sftp()
fr = sftp.file("key.pem", 'wb')
fr.set_pipelined(True)
fr.write(self.keypair.material)
fr.close()
ssh_conn.close()
ssh.exec_command('chmod 400 key.pem')
ssh.exec_command(
"ssh -i key.pem -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no %(user)s@%(ip)s "
"curl -s http://google.com" %
{"user": self.ssh_user, "ip": db_ip})

View File

@ -1,379 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netaddr
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils.linux import remote_client
from tempest import test
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class VPC_Scenario(aws_base.BaseAWSTest):
"""
Reproduce 'VPC with Public and Private Subnets' scenario
(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html)
"""
class Context(object):
vpc = None
internet_gateway = None
web_subnet = None
db_subnet = None
main_route_table = None
custom_route_table = None
web_security_group = None
nat_security_group = None
db_security_group = None
web_instance = None
db_instance = None
nat_instance = None
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VPC_Scenario, cls).setUpClass()
cls.ctx = cls.Context()
cls.zone = cls.config.boto.aws_zone
cfg = cls.config.cloudscaling
cls.ssh_user = cfg.general_ssh_user_name
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
cls.web_subnet, cls.db_subnet = cls.vpc_cidr.subnet(
cfg.vpc_subnet_prefix, 2)
cls.test_client_cidr = netaddr.IPNetwork(cfg.test_client_cidr)
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
cls.keypair = cls._prepare_key_pair()
@classmethod
def tearDownClass(cls):
if cls.ctx is not None:
for group in [cls.ctx.web_security_group,
cls.ctx.nat_security_group,
cls.ctx.db_security_group]:
if not group:
continue
try:
cls._revoke_security_group_linked_rules(group)
except Exception:
pass
super(VPC_Scenario, cls).tearDownClass()
@classmethod
def _revoke_security_group_linked_rules(cls, group):
groups = cls.vpc_client.get_all_security_groups(group_ids=[group.id])
if len(groups) == 0:
return
sg = groups[0]
for rule in sg.rules:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group(
group_id=sg.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_security_group_group_id=grant.groupId)
for rule in sg.rules_egress:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group_egress(
sg.id,
rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grant.groupId)
def test_000_create_vpc(self):
"""Create VPC"""
vpc = self.vpc_client.create_vpc(str(self.vpc_cidr))
self.assertIsNotNone(vpc)
self.assertTrue(vpc.id)
self.addResourceCleanUp(self.vpc_client.delete_vpc, vpc.id)
self.ctx.vpc = vpc
def test_001_create_internet_gateway(self):
"""Create internet gateway"""
ig = self.vpc_client.create_internet_gateway()
self.assertIsNotNone(ig)
self.assertTrue(ig.id)
self.addResourceCleanUp(self._destroy_internet_gateway, ig)
status = self.vpc_client.attach_internet_gateway(ig.id,
self.ctx.vpc.id)
self.assertTrue(status)
self.ctx.internet_gateway = ig
def test_010_create_subnets(self):
"""Create subnets"""
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.web_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.web_subnet = sn
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.db_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.db_subnet = sn
def test_020_get_main_route_table(self):
"""Describe auto created route table"""
rtables = self.vpc_client.get_all_route_tables(
filters=[("vpc-id", self.ctx.vpc.id)])
self.assertIsNotNone(rtables)
self.assertEqual(1, len(rtables))
self.ctx.main_route_table = rtables[0]
def test_025_create_custom_route_table(self):
"""Create route table for web servers"""
rtable = self.vpc_client.create_route_table(self.ctx.vpc.id)
self.assertIsNotNone(rtable)
self.assertTrue(rtable.id)
self.addResourceCleanUp(self.vpc_client.delete_route_table, rtable.id)
ig = self.ctx.internet_gateway
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
gateway_id=ig.id)
self.assertTrue(status)
association_id = self.vpc_client.associate_route_table(
rtable.id, self.ctx.web_subnet.id)
self.assertTrue(association_id)
self.addResourceCleanUp(self.vpc_client.disassociate_route_table,
association_id)
self.ctx.custom_route_table = rtable
def test_050_create_security_groups(self):
"""Create and tune security groups"""
sg = self.vpc_client.create_security_group(
data_utils.rand_name("WebServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.web_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("NATSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.nat_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("DBServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.db_security_group = sg
sg = self.ctx.web_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 1433, 1433,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 3306, 3306,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 22, 22,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=3389,
to_port=3389, cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.nat_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.db_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=1433,
to_port=1433,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=3306,
to_port=3306,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=22,
to_port=22,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
def test_100_launch_nat_instance(self):
"""Launch instances for NAT server"""
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.nat_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.web_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
self._prepare_public_ip(instance)
status = self.vpc_client.modify_instance_attribute(
instance.id, 'sourceDestCheck', False)
self.assertTrue(status)
rtable = self.ctx.main_route_table
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
instance_id=instance.id)
self.assertTrue(status)
self.ctx.nat_instance = instance
def test_101_launch_instances(self):
"""Launch instances for web server and db server"""
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.web_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.web_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
self._prepare_public_ip(instance)
self.ctx.web_instance = instance
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.db_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.db_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
self.ctx.db_instance = instance
def test_102_tune_nat_instance(self):
"""Tune NAT in NAT instance"""
instance = self.ctx.nat_instance
address = instance.ip_address
ssh = remote_client.RemoteClient(address,
self.ssh_user,
pkey=self.keypair.material)
# NOTE(ft): We must use tty mode, because some images (like Amazon
# Linux) has restrictions (requiretty flag in /etc/sudoers)
ssh_conn = ssh.ssh_client._get_ssh_connection()
chan = ssh_conn.get_transport().open_session()
chan.get_pty()
chan.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
"-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
chan.close()
chan = ssh_conn.get_transport().open_session()
chan.get_pty()
chan.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
chan.close()
ssh_conn.close()
def test_200_check_connectivity(self):
"""Check inside and outside connectivities"""
web_ip = self.ctx.web_instance.ip_address
db_ip = self.ctx.db_instance.private_ip_address
ssh = remote_client.RemoteClient(web_ip,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("curl -s http://google.com")
ssh_conn = ssh.ssh_client._get_ssh_connection()
sftp = ssh_conn.open_sftp()
fr = sftp.file("key.pem", 'wb')
fr.set_pipelined(True)
fr.write(self.keypair.material)
fr.close()
ssh_conn.close()
ssh.exec_command('chmod 400 key.pem')
ssh.exec_command("ssh -i key.pem -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no %(user)s@%(ip)s "
"curl -s http://google.com" %
{"user": self.ssh_user, "ip": db_ip})

View File

@ -1,35 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('paramiko').setLevel(logging.WARNING)
LOG = logging.getLogger(__name__)
def detect_new_volume(proc_partitions, proc_partitions_new):
devices = get_devices(proc_partitions)
devices_new = get_devices(proc_partitions_new)
devices_new -= devices
return devices_new.pop()
def get_devices(proc_partitions):
devices = set()
for line in proc_partitions:
items = [item for item in line.split(' ') if len(item) > 0]
if len(items) > 0:
devices.add(items[3])
return devices

View File

@ -54,10 +54,7 @@ class EC2_EBSInstanceTuneBDM(base.EC2TestCase):
@decorators.idempotent_id('2f51dd78-ff1e-494a-bcbc-f47580df17cb')
def test_launch_ebs_instance_with_persistent_root_device(self):
"""
Launch EBS-backed instance with left root device after termination
"""
"""Launch EBS-backed instance with persistent root device"""
instance_id = self.run_instance(ImageId=self.image_id,
BlockDeviceMappings=[{'DeviceName': self.root_device_name,
'Ebs': {'DeleteOnTermination': False}}])
@ -132,7 +129,7 @@ class EC2_EBSInstanceTuneBDM(base.EC2TestCase):
class EC2_EBSInstanceAttaching(base.EC2TestCase):
"""
"""Test attaching cases
Launch instance with two attached volumes. One at first free slot (xxdb,
other at some free slot (xxdh). Use full device name for the first and
@ -288,7 +285,7 @@ class EC2_EBSInstanceAttaching(base.EC2TestCase):
class EC2_EBSInstanceSnapshot(base.EC2TestCase):
"""
"""Test instance's snapshotting
Launch EBS-backed image, snapshot root device, register image,
and launch another instance from the image
@ -363,7 +360,7 @@ class EC2_EBSInstanceSnapshot(base.EC2TestCase):
class EC2_EBSInstanceResizeRootDevice(base.EC2TestCase):
"""
"""Test resizing root device on instance
Launch EBS-backed instance, stop instance, detach root volume, snapshot it,
create volume from snapshot with increased size, attach new root volume,

View File

@ -1,280 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ec2api_tempest_plugin Release Notes'
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
repository_name = 'openstack/openstack'
bug_project = 'ec2api_tempest_plugin'
bug_tag = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ec2api_tempest_pluginReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ec2api_tempest_pluginReleaseNotes.tex',
u'ec2api_tempest_plugin Release Notes Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ec2api_tempest_pluginrereleasenotes',
u'ec2api_tempest_plugin Release Notes Documentation',
[u'OpenStack Foundation'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ec2api_tempest_plugin ReleaseNotes',
u'ec2api_tempest_plugin Release Notes Documentation',
u'OpenStack Foundation', 'ec2api_tempest_pluginReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']

View File

@ -1,8 +0,0 @@
============================================
ec2api_tempest_plugin Release Notes
============================================
.. toctree::
:maxdepth: 1
unreleased

View File

@ -1,5 +0,0 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

View File

@ -2,4 +2,11 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=2.0 # Apache-2.0
pbr!=2.1.0,>=2.0.0 # Apache-2.0
botocore>=1.5.1 # Apache-2.0
oslo.config>=5.1.0 # Apache-2.0
oslo.log>=3.30.0 # Apache-2.0
six>=1.10.0 # MIT
testtools>=2.2.0 # MIT
paramiko>=2.0.0 # LGPLv2.1+
netaddr>=0.7.18 # BSD

View File

@ -23,15 +23,6 @@ classifier =
packages =
ec2api_tempest_plugin
[build_sphinx]
all-files = 1
warning-is-error = 1
source-dir = doc/source
build-dir = doc/build
[upload_sphinx]
upload-dir = doc/build/html
[compile_catalog]
directory = ec2api_tempest_plugin/locale
domain = ec2api_tempest_plugin

View File

@ -3,13 +3,3 @@
# process, which may cause wedges in the gate later.
hacking>=0.12.0,<0.13 # Apache-2.0
coverage>=4.0,!=4.4 # Apache-2.0
python-subunit>=0.0.18 # Apache-2.0/BSD
sphinx>=1.6.2 # BSD
oslotest>=1.10.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=1.4.0 # MIT
openstackdocstheme>=1.11.0 # Apache-2.0
# releasenotes
reno>=1.8.0 # Apache-2.0

38
tox.ini
View File

@ -1,19 +1,29 @@
[tox]
minversion = 2.0
envlist = py34,py27,pypy,pep8
envlist = py27,pep8
skipsdist = True
[testenv]
usedevelop = True
# tox is silly... these need to be separated by a newline....
whitelist_externals = bash
find
rm
env
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_STDOUT_CAPTURE=1
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=60
PYTHONWARNINGS=default::DeprecationWarning
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
deps = -r{toxinidir}/test-requirements.txt
commands = stestr run {posargs}
commands =
find . -type f -name "*.pyc" -delete
rm -f .testrepository/times.dbm
[testenv:pep8]
commands = flake8 {posargs}
@ -21,23 +31,6 @@ commands = flake8 {posargs}
[testenv:venv]
commands = {posargs}
[testenv:cover]
setenv =
VIRTUAL_ENV={envdir}
PYTHON=coverage run --source ec2api_tempest_plugin --parallel-mode
commands =
stestr run {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
[testenv:docs]
commands = python setup.py build_sphinx
[testenv:releasenotes]
commands =
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:debug]
commands = oslo_debug_helper {posargs}
@ -45,6 +38,9 @@ commands = oslo_debug_helper {posargs}
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125
ignore = E122,E123,E125,E126,E127,E128
builtins = _
# H106: Dont put vim configuration in source files
# H203: Use assertIs(Not)None to check for None
enable-extensions=H106,H203
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build