Remove the ec2 api tests from tempest
With the introduction of tempest plugins we can now remove the third party tests for the ec2 api using boto. They've always been the ugly duckling in tempest (except that it never turned into a swan) where they go against some of the fundamental principles of tempest tests. For exaple, like having it's own client implementation, oh and testing OpenStack APIs. This patch removes all the pieces of the third party test dir and deprecates all the config options related to boto. A plugin implementation is being worked [1] that can be used to fill the coverage hole left by removing these from the tempest tree [1] https://github.com/mtreinish/tempest_ec2 Change-Id: Ib5e24e19bcba9808a9f49fe7f328668df77fe4f9
This commit is contained in:
parent
2d219b1e0f
commit
42d69512d9
@ -87,7 +87,7 @@ as it is simpler, and quicker to work with.
|
||||
be done with testr directly or any `testr`_ based test runner, like
|
||||
`ostestr`_. For example, from the working dir running::
|
||||
|
||||
$ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty))'
|
||||
$ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))'
|
||||
|
||||
will run the same set of tests as the default gate jobs.
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../tempest/thirdparty/README.rst
|
@ -26,7 +26,6 @@ where your test contributions should go.
|
||||
field_guide/api
|
||||
field_guide/scenario
|
||||
field_guide/stress
|
||||
field_guide/thirdparty
|
||||
field_guide/unit_tests
|
||||
|
||||
---------------------------
|
||||
|
@ -7,7 +7,6 @@ anyjson>=0.3.3
|
||||
httplib2>=0.7.5
|
||||
jsonschema!=2.5.0,<3.0.0,>=2.0.0
|
||||
testtools>=1.4.0
|
||||
boto>=2.32.1
|
||||
paramiko>=1.13.0
|
||||
netaddr!=0.7.16,>=0.7.12
|
||||
testrepository>=0.0.18
|
||||
|
@ -16,7 +16,6 @@ to make this clear.
|
||||
| api/ - API tests
|
||||
| scenario/ - complex scenario tests
|
||||
| stress/ - stress tests
|
||||
| thirdparty/ - 3rd party api tests
|
||||
|
||||
Each of these directories contains different types of tests. What
|
||||
belongs in each directory, the rules and examples for good tests, are
|
||||
@ -56,14 +55,6 @@ workload against it and seeing what breaks. The stress test framework runs
|
||||
several test jobs in parallel and can run any existing test in Tempest as a
|
||||
stress job.
|
||||
|
||||
:ref:`third_party_field_guide`
|
||||
------------------------------
|
||||
|
||||
Many openstack components include 3rdparty API support. It is
|
||||
completely legitimate for Tempest to include tests of 3rdparty APIs,
|
||||
but those should be kept separate from the normal OpenStack
|
||||
validation.
|
||||
|
||||
:ref:`unit_tests_field_guide`
|
||||
-----------------------------
|
||||
|
||||
|
@ -70,7 +70,6 @@ from tempest import exceptions
|
||||
from tempest import manager
|
||||
from tempest.services.baremetal.v1.json.baremetal_client import \
|
||||
BaremetalClient
|
||||
from tempest.services import botoclients
|
||||
from tempest.services.compute.json.floating_ips_client import \
|
||||
FloatingIPsClient as ComputeFloatingIPsClient
|
||||
from tempest.services.compute.json.keypairs_client import KeyPairsClient
|
||||
@ -320,15 +319,6 @@ class Manager(manager.Manager):
|
||||
self.negative_client = negative_rest_client.NegativeRestClient(
|
||||
self.auth_provider, service, **self.default_params)
|
||||
|
||||
# Generating EC2 credentials in tempest is only supported
|
||||
# with identity v2
|
||||
if CONF.identity_feature_enabled.api_v2 and \
|
||||
CONF.identity.auth_version == 'v2':
|
||||
# EC2 and S3 clients, if used, will check configured AWS
|
||||
# credentials and generate new ones if needed
|
||||
self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
|
||||
self.s3_client = botoclients.ObjectClientS3(self.identity_client)
|
||||
|
||||
def _set_compute_clients(self):
|
||||
params = {
|
||||
'service': CONF.compute.catalog_type,
|
||||
|
@ -387,9 +387,6 @@ ComputeFeaturesGroup = [
|
||||
default=True,
|
||||
help='Does the test environment support creating snapshot '
|
||||
'images of running instances?'),
|
||||
cfg.BoolOpt('ec2_api',
|
||||
default=True,
|
||||
help='Does the test environment have the ec2 api running?'),
|
||||
cfg.BoolOpt('nova_cert',
|
||||
default=True,
|
||||
help='Does the test environment have the nova cert running?'),
|
||||
@ -1003,54 +1000,6 @@ DataProcessingFeaturesGroup = [
|
||||
help="List of enabled data processing plugins")
|
||||
]
|
||||
|
||||
|
||||
boto_group = cfg.OptGroup(name='boto',
|
||||
title='EC2/S3 options')
|
||||
BotoGroup = [
|
||||
cfg.StrOpt('ec2_url',
|
||||
default="http://localhost:8773/services/Cloud",
|
||||
help="EC2 URL"),
|
||||
cfg.StrOpt('s3_url',
|
||||
default="http://localhost:8080",
|
||||
help="S3 URL"),
|
||||
cfg.StrOpt('aws_secret',
|
||||
help="AWS Secret Key",
|
||||
secret=True),
|
||||
cfg.StrOpt('aws_access',
|
||||
help="AWS Access Key"),
|
||||
cfg.StrOpt('aws_zone',
|
||||
default="nova",
|
||||
help="AWS Zone for EC2 tests"),
|
||||
cfg.StrOpt('s3_materials_path',
|
||||
default="/opt/stack/devstack/files/images/"
|
||||
"s3-materials/cirros-0.3.0",
|
||||
help="S3 Materials Path"),
|
||||
cfg.StrOpt('ari_manifest',
|
||||
default="cirros-0.3.0-x86_64-initrd.manifest.xml",
|
||||
help="ARI Ramdisk Image manifest"),
|
||||
cfg.StrOpt('ami_manifest',
|
||||
default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
|
||||
help="AMI Machine Image manifest"),
|
||||
cfg.StrOpt('aki_manifest',
|
||||
default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
|
||||
help="AKI Kernel Image manifest"),
|
||||
cfg.StrOpt('instance_type',
|
||||
default="m1.tiny",
|
||||
help="Instance type"),
|
||||
cfg.IntOpt('http_socket_timeout',
|
||||
default=3,
|
||||
help="boto Http socket timeout"),
|
||||
cfg.IntOpt('num_retries',
|
||||
default=1,
|
||||
help="boto num_retries on error"),
|
||||
cfg.IntOpt('build_timeout',
|
||||
default=60,
|
||||
help="Status Change Timeout"),
|
||||
cfg.IntOpt('build_interval',
|
||||
default=1,
|
||||
help="Status Change Test Interval"),
|
||||
]
|
||||
|
||||
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
|
||||
|
||||
StressGroup = [
|
||||
@ -1309,7 +1258,6 @@ _opts = [
|
||||
(dashboard_group, DashboardGroup),
|
||||
(data_processing_group, DataProcessingGroup),
|
||||
(data_processing_feature_group, DataProcessingFeaturesGroup),
|
||||
(boto_group, BotoGroup),
|
||||
(stress_group, StressGroup),
|
||||
(scenario_group, ScenarioGroup),
|
||||
(service_available_group, ServiceAvailableGroup),
|
||||
@ -1379,7 +1327,6 @@ class TempestConfigPrivate(object):
|
||||
self.data_processing = _CONF['data-processing']
|
||||
self.data_processing_feature_enabled = _CONF[
|
||||
'data-processing-feature-enabled']
|
||||
self.boto = _CONF.boto
|
||||
self.stress = _CONF.stress
|
||||
self.scenario = _CONF.scenario
|
||||
self.service_available = _CONF.service_available
|
||||
|
@ -1,217 +0,0 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import types
|
||||
|
||||
import boto
|
||||
import boto.ec2
|
||||
import boto.s3.connection
|
||||
from six.moves import configparser as ConfigParser
|
||||
from six.moves.urllib import parse as urlparse
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from tempest import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BotoClientBase(object):
|
||||
|
||||
ALLOWED_METHODS = set()
|
||||
|
||||
def __init__(self, identity_client):
|
||||
self.identity_client = identity_client
|
||||
|
||||
self.ca_cert = CONF.identity.ca_certificates_file
|
||||
self.connection_timeout = str(CONF.boto.http_socket_timeout)
|
||||
self.num_retries = str(CONF.boto.num_retries)
|
||||
self.build_timeout = CONF.boto.build_timeout
|
||||
|
||||
self.connection_data = {}
|
||||
|
||||
def _config_boto_timeout(self, timeout, retries):
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "http_socket_timeout", timeout)
|
||||
boto.config.set("Boto", "num_retries", retries)
|
||||
|
||||
def _config_boto_ca_certificates_file(self, ca_cert):
|
||||
if ca_cert is None:
|
||||
return
|
||||
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "ca_certificates_file", ca_cert)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Automatically creates methods for the allowed methods set."""
|
||||
if name in self.ALLOWED_METHODS:
|
||||
def func(self, *args, **kwargs):
|
||||
with contextlib.closing(self.get_connection()) as conn:
|
||||
return getattr(conn, name)(*args, **kwargs)
|
||||
|
||||
func.__name__ = name
|
||||
setattr(self, name, types.MethodType(func, self, self.__class__))
|
||||
setattr(self.__class__, name,
|
||||
types.MethodType(func, None, self.__class__))
|
||||
return getattr(self, name)
|
||||
else:
|
||||
raise AttributeError(name)
|
||||
|
||||
def get_connection(self):
|
||||
self._config_boto_timeout(self.connection_timeout, self.num_retries)
|
||||
self._config_boto_ca_certificates_file(self.ca_cert)
|
||||
|
||||
ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
|
||||
'aws_secret_access_key': CONF.boto.aws_secret}
|
||||
if not all(ec2_client_args.values()):
|
||||
ec2_client_args = self.get_aws_credentials(self.identity_client)
|
||||
|
||||
self.connection_data.update(ec2_client_args)
|
||||
return self.connect_method(**self.connection_data)
|
||||
|
||||
def get_aws_credentials(self, identity_client):
|
||||
"""Obtain existing, or create new AWS credentials
|
||||
|
||||
:param identity_client: identity client with embedded credentials
|
||||
:return: EC2 credentials
|
||||
"""
|
||||
ec2_cred_list = identity_client.list_user_ec2_credentials(
|
||||
identity_client.user_id)['credentials']
|
||||
for cred in ec2_cred_list:
|
||||
if cred['tenant_id'] == identity_client.tenant_id:
|
||||
ec2_cred = cred
|
||||
break
|
||||
else:
|
||||
ec2_cred = (identity_client.create_user_ec2_credentials(
|
||||
identity_client.user_id,
|
||||
tenant_id=identity_client.tenant_id)['credential'])
|
||||
if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
|
||||
raise lib_exc.NotFound("Unable to get access and secret keys")
|
||||
else:
|
||||
ec2_cred_aws = {}
|
||||
ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
|
||||
ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
|
||||
return ec2_cred_aws
|
||||
|
||||
|
||||
class APIClientEC2(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_ec2(*args, **kwargs)
|
||||
|
||||
def __init__(self, identity_client):
|
||||
super(APIClientEC2, self).__init__(identity_client)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
purl = urlparse.urlparse(CONF.boto.ec2_url)
|
||||
|
||||
region_name = CONF.compute.region
|
||||
if not region_name:
|
||||
region_name = CONF.identity.region
|
||||
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
|
||||
endpoint=purl.hostname)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data.update({"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"region": region,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"path": purl.path})
|
||||
|
||||
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
|
||||
'delete_key_pair', 'import_key_pair',
|
||||
'get_all_key_pairs',
|
||||
'get_all_tags',
|
||||
'create_image', 'get_image',
|
||||
'register_image', 'deregister_image',
|
||||
'get_all_images', 'get_image_attribute',
|
||||
'modify_image_attribute', 'reset_image_attribute',
|
||||
'get_all_kernels',
|
||||
'create_volume', 'delete_volume',
|
||||
'get_all_volume_status', 'get_all_volumes',
|
||||
'get_volume_attribute', 'modify_volume_attribute'
|
||||
'bundle_instance', 'cancel_spot_instance_requests',
|
||||
'confirm_product_instanc',
|
||||
'get_all_instance_status', 'get_all_instances',
|
||||
'get_all_reserved_instances',
|
||||
'get_all_spot_instance_requests',
|
||||
'get_instance_attribute', 'monitor_instance',
|
||||
'monitor_instances', 'unmonitor_instance',
|
||||
'unmonitor_instances',
|
||||
'purchase_reserved_instance_offering',
|
||||
'reboot_instances', 'request_spot_instances',
|
||||
'reset_instance_attribute', 'run_instances',
|
||||
'start_instances', 'stop_instances',
|
||||
'terminate_instances',
|
||||
'attach_network_interface', 'attach_volume',
|
||||
'detach_network_interface', 'detach_volume',
|
||||
'get_console_output',
|
||||
'delete_network_interface', 'create_subnet',
|
||||
'create_network_interface', 'delete_subnet',
|
||||
'get_all_network_interfaces',
|
||||
'allocate_address', 'associate_address',
|
||||
'disassociate_address', 'get_all_addresses',
|
||||
'release_address',
|
||||
'create_snapshot', 'delete_snapshot',
|
||||
'get_all_snapshots', 'get_snapshot_attribute',
|
||||
'modify_snapshot_attribute',
|
||||
'reset_snapshot_attribute', 'trim_snapshots',
|
||||
'get_all_regions', 'get_all_zones',
|
||||
'get_all_security_groups', 'create_security_group',
|
||||
'delete_security_group', 'authorize_security_group',
|
||||
'authorize_security_group_egress',
|
||||
'revoke_security_group',
|
||||
'revoke_security_group_egress'))
|
||||
|
||||
|
||||
class ObjectClientS3(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_s3(*args, **kwargs)
|
||||
|
||||
def __init__(self, identity_client):
|
||||
super(ObjectClientS3, self).__init__(identity_client)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
purl = urlparse.urlparse(CONF.boto.s3_url)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data.update({"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"calling_format": boto.s3.connection.
|
||||
OrdinaryCallingFormat()})
|
||||
|
||||
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
|
||||
'get_all_buckets', 'get_bucket', 'delete_key',
|
||||
'lookup'))
|
@ -30,8 +30,7 @@ def load_tests(loader, tests, pattern):
|
||||
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
|
||||
base_path = os.path.split(base_path)[0]
|
||||
# Load local tempest tests
|
||||
for test_dir in ['tempest/api', 'tempest/scenario',
|
||||
'tempest/thirdparty']:
|
||||
for test_dir in ['tempest/api', 'tempest/scenario']:
|
||||
full_test_dir = os.path.join(base_path, test_dir)
|
||||
if not pattern:
|
||||
suite.addTests(loader.discover(full_test_dir,
|
||||
|
35
tempest/thirdparty/README.rst
vendored
35
tempest/thirdparty/README.rst
vendored
@ -1,35 +0,0 @@
|
||||
.. _third_party_field_guide:
|
||||
|
||||
Tempest Field Guide to Third Party API tests
|
||||
============================================
|
||||
|
||||
|
||||
What are these tests?
|
||||
---------------------
|
||||
|
||||
Third party tests are tests for non native OpenStack APIs that are
|
||||
part of OpenStack projects. If we ship an API, we're really required
|
||||
to ensure that it's working.
|
||||
|
||||
An example is that Nova Compute currently has EC2 API support in tree,
|
||||
which should be tested as part of normal process.
|
||||
|
||||
|
||||
Why are these tests in tempest?
|
||||
-------------------------------
|
||||
|
||||
If we ship an API in an OpenStack component, there should be tests in
|
||||
tempest to exercise it in some way.
|
||||
|
||||
|
||||
Scope of these tests
|
||||
--------------------
|
||||
|
||||
Third party API testing should be limited to the functional testing of
|
||||
third party API compliance. Complex scenarios should be avoided, and
|
||||
instead exercised with the OpenStack API, unless the third party API
|
||||
can't be tested without those scenarios.
|
||||
|
||||
Whenever possible third party API testing should use a client as close
|
||||
to the third party API as possible. The point of these tests is API
|
||||
validation.
|
0
tempest/thirdparty/__init__.py
vendored
0
tempest/thirdparty/__init__.py
vendored
0
tempest/thirdparty/boto/__init__.py
vendored
0
tempest/thirdparty/boto/__init__.py
vendored
694
tempest/thirdparty/boto/test.py
vendored
694
tempest/thirdparty/boto/test.py
vendored
@ -1,694 +0,0 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import logging as orig_logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import boto
|
||||
from boto import ec2
|
||||
from boto import exception
|
||||
from boto import s3
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from six.moves.urllib import parse as urlparse
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from tempest.common import credentials_factory as credentials
|
||||
from tempest.common.utils import file_utils
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
import tempest.test
|
||||
from tempest.thirdparty.boto.utils import wait
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def decision_maker():
|
||||
A_I_IMAGES_READY = True # ari,ami,aki
|
||||
S3_CAN_CONNECT_ERROR = None
|
||||
EC2_CAN_CONNECT_ERROR = None
|
||||
secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
|
||||
id_matcher = re.compile("[A-Za-z0-9]{20,}")
|
||||
|
||||
def all_read(*args):
|
||||
return all(map(file_utils.have_effective_read_access, args))
|
||||
|
||||
materials_path = CONF.boto.s3_materials_path
|
||||
ami_path = materials_path + os.sep + CONF.boto.ami_manifest
|
||||
aki_path = materials_path + os.sep + CONF.boto.aki_manifest
|
||||
ari_path = materials_path + os.sep + CONF.boto.ari_manifest
|
||||
|
||||
A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
|
||||
boto_logger = logging.getLogger('boto')
|
||||
level = boto_logger.logger.level
|
||||
# suppress logging for boto
|
||||
boto_logger.logger.setLevel(orig_logging.CRITICAL)
|
||||
|
||||
def _cred_sub_check(connection_data):
|
||||
if not id_matcher.match(connection_data["aws_access_key_id"]):
|
||||
raise Exception("Invalid AWS access Key")
|
||||
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
|
||||
raise Exception("Invalid AWS secret Key")
|
||||
raise Exception("Unknown (Authentication?) Error")
|
||||
# NOTE(andreaf) Setting up an extra manager here is redundant,
|
||||
# and should be removed.
|
||||
openstack = credentials.ConfiguredUserManager()
|
||||
try:
|
||||
if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
|
||||
raise Exception("Failed to get hostname from the ec2_url")
|
||||
ec2client = openstack.ec2api_client
|
||||
try:
|
||||
ec2client.get_all_regions()
|
||||
except exception.BotoServerError as exc:
|
||||
if exc.error_code is None:
|
||||
raise Exception("EC2 target does not looks EC2 service")
|
||||
_cred_sub_check(ec2client.connection_data)
|
||||
|
||||
except lib_exc.Unauthorized:
|
||||
EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
|
||||
" also failed to get it from keystone"
|
||||
except Exception as exc:
|
||||
EC2_CAN_CONNECT_ERROR = str(exc)
|
||||
|
||||
try:
|
||||
if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
|
||||
raise Exception("Failed to get hostname from the s3_url")
|
||||
s3client = openstack.s3_client
|
||||
try:
|
||||
s3client.get_bucket("^INVALID*#()@INVALID.")
|
||||
except exception.BotoServerError as exc:
|
||||
if exc.status == 403:
|
||||
_cred_sub_check(s3client.connection_data)
|
||||
except Exception as exc:
|
||||
S3_CAN_CONNECT_ERROR = str(exc)
|
||||
except lib_exc.Unauthorized:
|
||||
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
|
||||
" failed to get them even by keystoneclient"
|
||||
boto_logger.logger.setLevel(level)
|
||||
return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
|
||||
'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
|
||||
'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
|
||||
|
||||
|
||||
class BotoExceptionMatcher(object):
|
||||
STATUS_RE = r'[45]\d\d'
|
||||
CODE_RE = '.*' # regexp makes sense in group match
|
||||
|
||||
def match(self, exc):
|
||||
"""Check boto exception
|
||||
|
||||
:returns: Returns with an error string if it does not match,
|
||||
returns with None when it matches.
|
||||
"""
|
||||
if not isinstance(exc, exception.BotoServerError):
|
||||
return "%r not an BotoServerError instance" % exc
|
||||
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
|
||||
if re.match(self.STATUS_RE, str(exc.status)) is None:
|
||||
return ("Status code (%s) does not match"
|
||||
"the expected re pattern \"%s\""
|
||||
% (exc.status, self.STATUS_RE))
|
||||
if re.match(self.CODE_RE, str(exc.error_code)) is None:
|
||||
return ("Error code (%s) does not match" +
|
||||
"the expected re pattern \"%s\"") %\
|
||||
(exc.error_code, self.CODE_RE)
|
||||
return None
|
||||
|
||||
|
||||
class ClientError(BotoExceptionMatcher):
|
||||
STATUS_RE = r'4\d\d'
|
||||
|
||||
|
||||
class ServerError(BotoExceptionMatcher):
|
||||
STATUS_RE = r'5\d\d'
|
||||
|
||||
|
||||
def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
|
||||
"""Usable for adding an ExceptionMatcher(s) into the exception tree.
|
||||
|
||||
The not leaf elements does wildcard match
|
||||
"""
|
||||
# in error_code just literal and '.' characters expected
|
||||
if not isinstance(error_data, six.string_types):
|
||||
(error_code, status_code) = map(str, error_data)
|
||||
else:
|
||||
status_code = None
|
||||
error_code = error_data
|
||||
parts = error_code.split('.')
|
||||
basematch = ""
|
||||
num_parts = len(parts)
|
||||
max_index = num_parts - 1
|
||||
add_cls = error_cls
|
||||
for i_part in six.moves.xrange(num_parts):
|
||||
part = parts[i_part]
|
||||
leaf = i_part == max_index
|
||||
if not leaf:
|
||||
match = basematch + part + "[.].*"
|
||||
else:
|
||||
match = basematch + part
|
||||
|
||||
basematch += part + "[.]"
|
||||
if not hasattr(add_cls, part):
|
||||
cls_dict = {"CODE_RE": match}
|
||||
if leaf and status_code is not None:
|
||||
cls_dict["STATUS_RE"] = status_code
|
||||
cls = type(part, (base, ), cls_dict)
|
||||
setattr(add_cls, part, cls())
|
||||
add_cls = cls
|
||||
elif leaf:
|
||||
raise LookupError("Tries to redefine an error code \"%s\"" % part)
|
||||
else:
|
||||
add_cls = getattr(add_cls, part)
|
||||
|
||||
|
||||
# TODO(afazekas): classmethod handling
|
||||
def friendly_function_name_simple(call_able):
|
||||
name = ""
|
||||
if hasattr(call_able, "im_class"):
|
||||
name += call_able.im_class.__name__ + "."
|
||||
name += call_able.__name__
|
||||
return name
|
||||
|
||||
|
||||
def friendly_function_call_str(call_able, *args, **kwargs):
|
||||
string = friendly_function_name_simple(call_able)
|
||||
string += "(" + ", ".join(map(str, args))
|
||||
if len(kwargs):
|
||||
if len(args):
|
||||
string += ", "
|
||||
string += ", ".join("=".join(map(str, (key, value)))
|
||||
for (key, value) in kwargs.items())
|
||||
return string + ")"
|
||||
|
||||
|
||||
class BotoTestCase(tempest.test.BaseTestCase):
|
||||
"""Recommended to use as base class for boto related test."""
|
||||
|
||||
credentials = ['primary']
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(BotoTestCase, cls).skip_checks()
|
||||
if not CONF.compute_feature_enabled.ec2_api:
|
||||
raise cls.skipException("The EC2 API is not available")
|
||||
if not CONF.identity_feature_enabled.api_v2 or \
|
||||
not CONF.identity.auth_version == 'v2':
|
||||
raise cls.skipException("Identity v2 is not available")
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BotoTestCase, cls).resource_setup()
|
||||
cls.conclusion = decision_maker()
|
||||
# The trash contains cleanup functions and parameters in tuples
|
||||
# (function, *args, **kwargs)
|
||||
cls._resource_trash_bin = {}
|
||||
cls._sequence = -1
|
||||
if (hasattr(cls, "EC2") and
|
||||
cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
|
||||
raise cls.skipException("EC2 " + cls.__name__ + ": " +
|
||||
cls.conclusion['EC2_CAN_CONNECT_ERROR'])
|
||||
if (hasattr(cls, "S3") and
|
||||
cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
|
||||
raise cls.skipException("S3 " + cls.__name__ + ": " +
|
||||
cls.conclusion['S3_CAN_CONNECT_ERROR'])
|
||||
|
||||
@classmethod
|
||||
def addResourceCleanUp(cls, function, *args, **kwargs):
|
||||
"""Adds CleanUp callable, used by tearDownClass.
|
||||
|
||||
Recommended to a use (deep)copy on the mutable args.
|
||||
"""
|
||||
cls._sequence = cls._sequence + 1
|
||||
cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
|
||||
return cls._sequence
|
||||
|
||||
@classmethod
|
||||
def cancelResourceCleanUp(cls, key):
|
||||
"""Cancel Clean up request."""
|
||||
del cls._resource_trash_bin[key]
|
||||
|
||||
# TODO(afazekas): Add "with" context handling
|
||||
def assertBotoError(self, excMatcher, callableObj,
|
||||
*args, **kwargs):
|
||||
"""Example usage:
|
||||
|
||||
self.assertBotoError(self.ec2_error_code.client.
|
||||
InvalidKeyPair.Duplicate,
|
||||
self.client.create_keypair,
|
||||
key_name)
|
||||
"""
|
||||
try:
|
||||
callableObj(*args, **kwargs)
|
||||
except exception.BotoServerError as exc:
|
||||
error_msg = excMatcher.match(exc)
|
||||
if error_msg is not None:
|
||||
raise self.failureException(error_msg)
|
||||
else:
|
||||
raise self.failureException("BotoServerError not raised")
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""Calls the callables added by addResourceCleanUp
|
||||
|
||||
when you overwrite this function don't forget to call this too.
|
||||
"""
|
||||
fail_count = 0
|
||||
trash_keys = sorted(cls._resource_trash_bin, reverse=True)
|
||||
for key in trash_keys:
|
||||
(function, pos_args, kw_args) = cls._resource_trash_bin[key]
|
||||
try:
|
||||
func_name = friendly_function_call_str(function, *pos_args,
|
||||
**kw_args)
|
||||
LOG.debug("Cleaning up: %s" % func_name)
|
||||
function(*pos_args, **kw_args)
|
||||
except BaseException:
|
||||
fail_count += 1
|
||||
LOG.exception("Cleanup failed %s" % func_name)
|
||||
finally:
|
||||
del cls._resource_trash_bin[key]
|
||||
super(BotoTestCase, cls).resource_cleanup()
|
||||
# NOTE(afazekas): let the super called even on exceptions
|
||||
# The real exceptions already logged, if the super throws another,
|
||||
# does not causes hidden issues
|
||||
if fail_count:
|
||||
raise exceptions.TearDownException(num=fail_count)
|
||||
|
||||
ec2_error_code = BotoExceptionMatcher()
|
||||
# InsufficientInstanceCapacity can be both server and client error
|
||||
ec2_error_code.server = ServerError()
|
||||
ec2_error_code.client = ClientError()
|
||||
s3_error_code = BotoExceptionMatcher()
|
||||
s3_error_code.server = ServerError()
|
||||
s3_error_code.client = ClientError()
|
||||
valid_image_state = set(('available', 'pending', 'failed'))
|
||||
# NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
|
||||
# a good mapping, because it uses memory, but not really a running machine
|
||||
valid_instance_state = set(('pending', 'running', 'shutting-down',
|
||||
'terminated', 'stopping', 'stopped', 'paused'))
|
||||
valid_volume_status = set(('creating', 'available', 'in-use',
|
||||
'deleting', 'deleted', 'error'))
|
||||
valid_snapshot_status = set(('pending', 'completed', 'error'))
|
||||
|
||||
gone_set = set(('_GONE',))
|
||||
|
||||
@classmethod
|
||||
def get_lfunction_gone(cls, obj):
|
||||
# NOTE: If the object is instance of a well know type returns back with
|
||||
# with the corresponding function otherwise it assumes the obj itself
|
||||
# is the function.
|
||||
ec = cls.ec2_error_code
|
||||
if isinstance(obj, ec2.instance.Instance):
|
||||
colusure_matcher = ec.client.InvalidInstanceID.NotFound
|
||||
status_attr = "state"
|
||||
elif isinstance(obj, ec2.image.Image):
|
||||
colusure_matcher = ec.client.InvalidAMIID.NotFound
|
||||
status_attr = "state"
|
||||
elif isinstance(obj, ec2.snapshot.Snapshot):
|
||||
colusure_matcher = ec.client.InvalidSnapshot.NotFound
|
||||
status_attr = "status"
|
||||
elif isinstance(obj, ec2.volume.Volume):
|
||||
colusure_matcher = ec.client.InvalidVolume.NotFound
|
||||
status_attr = "status"
|
||||
else:
|
||||
return obj
|
||||
|
||||
def _status():
|
||||
try:
|
||||
obj.update(validate=True)
|
||||
except ValueError:
|
||||
return "_GONE"
|
||||
except exception.EC2ResponseError as exc:
|
||||
if colusure_matcher.match(exc) is None:
|
||||
return "_GONE"
|
||||
else:
|
||||
raise
|
||||
return getattr(obj, status_attr)
|
||||
|
||||
return _status
|
||||
|
||||
def state_wait_gone(self, lfunction, final_set, valid_set):
|
||||
if not isinstance(final_set, set):
|
||||
final_set = set((final_set,))
|
||||
final_set |= self.gone_set
|
||||
lfunction = self.get_lfunction_gone(lfunction)
|
||||
state = wait.state_wait(lfunction, final_set, valid_set)
|
||||
self.assertIn(state, valid_set | self.gone_set)
|
||||
return state
|
||||
|
||||
def waitImageState(self, lfunction, wait_for):
|
||||
return self.state_wait_gone(lfunction, wait_for,
|
||||
self.valid_image_state)
|
||||
|
||||
def waitInstanceState(self, lfunction, wait_for):
|
||||
return self.state_wait_gone(lfunction, wait_for,
|
||||
self.valid_instance_state)
|
||||
|
||||
def waitSnapshotStatus(self, lfunction, wait_for):
|
||||
return self.state_wait_gone(lfunction, wait_for,
|
||||
self.valid_snapshot_status)
|
||||
|
||||
def waitVolumeStatus(self, lfunction, wait_for):
|
||||
return self.state_wait_gone(lfunction, wait_for,
|
||||
self.valid_volume_status)
|
||||
|
||||
def assertImageStateWait(self, lfunction, wait_for):
|
||||
state = self.waitImageState(lfunction, wait_for)
|
||||
self.assertIn(state, wait_for)
|
||||
|
||||
def assertInstanceStateWait(self, lfunction, wait_for):
|
||||
state = self.waitInstanceState(lfunction, wait_for)
|
||||
self.assertIn(state, wait_for)
|
||||
|
||||
def assertVolumeStatusWait(self, lfunction, wait_for):
|
||||
state = self.waitVolumeStatus(lfunction, wait_for)
|
||||
self.assertIn(state, wait_for)
|
||||
|
||||
def assertSnapshotStatusWait(self, lfunction, wait_for):
|
||||
state = self.waitSnapshotStatus(lfunction, wait_for)
|
||||
self.assertIn(state, wait_for)
|
||||
|
||||
def assertAddressDisassociatedWait(self, address):
|
||||
|
||||
def _disassociate():
|
||||
cli = self.ec2_client
|
||||
addresses = cli.get_all_addresses(addresses=(address.public_ip,))
|
||||
if len(addresses) != 1:
|
||||
return "INVALID"
|
||||
if addresses[0].instance_id:
|
||||
LOG.info("%s associated to %s",
|
||||
address.public_ip,
|
||||
addresses[0].instance_id)
|
||||
return "ASSOCIATED"
|
||||
return "DISASSOCIATED"
|
||||
|
||||
state = wait.state_wait(_disassociate, "DISASSOCIATED",
|
||||
set(("ASSOCIATED", "DISASSOCIATED")))
|
||||
self.assertEqual(state, "DISASSOCIATED")
|
||||
|
||||
def assertAddressReleasedWait(self, address):
|
||||
|
||||
def _address_delete():
|
||||
# NOTE(afazekas): the filter gives back IP
|
||||
# even if it is not associated to my tenant
|
||||
if (address.public_ip not in map(lambda a: a.public_ip,
|
||||
self.ec2_client.get_all_addresses())):
|
||||
return "DELETED"
|
||||
return "NOTDELETED"
|
||||
|
||||
state = wait.state_wait(_address_delete, "DELETED")
|
||||
self.assertEqual(state, "DELETED")
|
||||
|
||||
def assertReSearch(self, regexp, string):
|
||||
if re.search(regexp, string) is None:
|
||||
raise self.failureException("regexp: '%s' not found in '%s'" %
|
||||
(regexp, string))
|
||||
|
||||
def assertNotReSearch(self, regexp, string):
|
||||
if re.search(regexp, string) is not None:
|
||||
raise self.failureException("regexp: '%s' found in '%s'" %
|
||||
(regexp, string))
|
||||
|
||||
def assertReMatch(self, regexp, string):
|
||||
if re.match(regexp, string) is None:
|
||||
raise self.failureException("regexp: '%s' not matches on '%s'" %
|
||||
(regexp, string))
|
||||
|
||||
def assertNotReMatch(self, regexp, string):
|
||||
if re.match(regexp, string) is not None:
|
||||
raise self.failureException("regexp: '%s' matches on '%s'" %
|
||||
(regexp, string))
|
||||
|
||||
@classmethod
|
||||
def destroy_bucket(cls, connection_data, bucket):
|
||||
"""Destroys the bucket and its content, just for teardown."""
|
||||
exc_num = 0
|
||||
try:
|
||||
with contextlib.closing(
|
||||
boto.connect_s3(**connection_data)) as conn:
|
||||
if isinstance(bucket, basestring):
|
||||
bucket = conn.lookup(bucket)
|
||||
assert isinstance(bucket, s3.bucket.Bucket)
|
||||
for obj in bucket.list():
|
||||
try:
|
||||
bucket.delete_key(obj.key)
|
||||
obj.close()
|
||||
except BaseException:
|
||||
LOG.exception("Failed to delete key %s " % obj.key)
|
||||
exc_num += 1
|
||||
conn.delete_bucket(bucket)
|
||||
except BaseException:
|
||||
LOG.exception("Failed to destroy bucket %s " % bucket)
|
||||
exc_num += 1
|
||||
if exc_num:
|
||||
raise exceptions.TearDownException(num=exc_num)
|
||||
|
||||
@classmethod
|
||||
def destroy_reservation(cls, reservation):
|
||||
"""Terminate instances in a reservation, just for teardown."""
|
||||
exc_num = 0
|
||||
|
||||
def _instance_state():
|
||||
try:
|
||||
instance.update(validate=True)
|
||||
except ValueError:
|
||||
return "_GONE"
|
||||
except exception.EC2ResponseError as exc:
|
||||
if cls.ec2_error_code.\
|
||||
client.InvalidInstanceID.NotFound.match(exc) is None:
|
||||
return "_GONE"
|
||||
# NOTE(afazekas): incorrect code,
|
||||
# but the resource must be destroyed
|
||||
if exc.error_code == "InstanceNotFound":
|
||||
return "_GONE"
|
||||
|
||||
return instance.state
|
||||
|
||||
for instance in reservation.instances:
|
||||
try:
|
||||
instance.terminate()
|
||||
wait.re_search_wait(_instance_state, "_GONE")
|
||||
except BaseException:
|
||||
LOG.exception("Failed to terminate instance %s " % instance)
|
||||
exc_num += 1
|
||||
if exc_num:
|
||||
raise exceptions.TearDownException(num=exc_num)
|
||||
|
||||
# NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
|
||||
# to write better teardown
|
||||
|
||||
@classmethod
|
||||
def destroy_security_group_wait(cls, group):
|
||||
"""Delete group.
|
||||
|
||||
Use just for teardown!
|
||||
"""
|
||||
# NOTE(afazekas): should wait/try until all related instance terminates
|
||||
group.delete()
|
||||
|
||||
@classmethod
|
||||
def destroy_volume_wait(cls, volume):
|
||||
"""Delete volume, tries to detach first.
|
||||
|
||||
Use just for teardown!
|
||||
"""
|
||||
exc_num = 0
|
||||
snaps = volume.snapshots()
|
||||
if len(snaps):
|
||||
LOG.critical("%s Volume has %s snapshot(s)", volume.id,
|
||||
map(snaps.id, snaps))
|
||||
|
||||
# NOTE(afazekas): detaching/attaching not valid EC2 status
|
||||
def _volume_state():
|
||||
volume.update(validate=True)
|
||||
try:
|
||||
# NOTE(gmann): Make sure volume is attached.
|
||||
# Checking status as 'not "available"' is not enough to make
|
||||
# sure volume is attached as it can be in "error" state
|
||||
if volume.status == "in-use":
|
||||
volume.detach(force=True)
|
||||
except BaseException:
|
||||
LOG.exception("Failed to detach volume %s" % volume)
|
||||
# exc_num += 1 "nonlocal" not in python2
|
||||
return volume.status
|
||||
|
||||
try:
|
||||
wait.re_search_wait(_volume_state, "available")
|
||||
# not validates status
|
||||
LOG.info(_volume_state())
|
||||
volume.delete()
|
||||
except BaseException:
|
||||
LOG.exception("Failed to delete volume %s" % volume)
|
||||
exc_num += 1
|
||||
if exc_num:
|
||||
raise exceptions.TearDownException(num=exc_num)
|
||||
|
||||
@classmethod
|
||||
def destroy_snapshot_wait(cls, snapshot):
|
||||
"""delete snapshot, wait until it ceases to exist."""
|
||||
snapshot.delete()
|
||||
|
||||
def _update():
|
||||
snapshot.update(validate=True)
|
||||
|
||||
wait.wait_exception(_update)
|
||||
|
||||
|
||||
# you can specify tuples if you want to specify the status pattern
|
||||
for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
|
||||
'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
|
||||
'DiskImageSizeTooLarge', 'FilterLimitExceeded',
|
||||
'Gateway.NotAttached', 'IdempotentParameterMismatch',
|
||||
'IncorrectInstanceState', 'IncorrectState',
|
||||
'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
|
||||
'InsufficientReservedInstancesCapacity',
|
||||
'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
|
||||
'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
|
||||
'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
|
||||
'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
|
||||
'InvalidCustomerGateway.DuplicateIpAddress',
|
||||
'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
|
||||
'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
|
||||
'InvalidFilter', 'InvalidGatewayID.NotFound',
|
||||
'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
|
||||
'InvalidGroup.InUse', 'InvalidGroup.NotFound',
|
||||
'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
|
||||
'InvalidInstanceID.NotFound',
|
||||
'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
|
||||
'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
|
||||
'InvalidKeyPair.NotFound', 'InvalidManifest',
|
||||
'InvalidNetworkAclEntry.NotFound',
|
||||
'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
|
||||
'InvalidParameterValue', 'InvalidPermission.Duplicate',
|
||||
'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
|
||||
'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
|
||||
'InvalidRouteTableID.NotFound',
|
||||
'InvalidSecurity.RequestHasExpired',
|
||||
'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
|
||||
'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
|
||||
'InvalidReservedInstancesOfferingId',
|
||||
'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
|
||||
'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
|
||||
'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
|
||||
'InvalidVpnConnectionID.NotFound',
|
||||
'InvalidVpnGatewayID.NotFound',
|
||||
'InvalidZone.NotFound', 'LegacySecurityGroup',
|
||||
'MissingParameter', 'NetworkAclEntryAlreadyExists',
|
||||
'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
|
||||
'NonEBSInstance', 'PendingSnapshotLimitExceeded',
|
||||
'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
|
||||
'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
|
||||
'ResourceLimitExceeded', 'RouteAlreadyExists',
|
||||
'RouteLimitExceeded', 'RouteTableLimitExceeded',
|
||||
'RulesPerSecurityGroupLimitExceeded',
|
||||
'SecurityGroupLimitExceeded',
|
||||
'SecurityGroupsPerInstanceLimitExceeded',
|
||||
'SnapshotLimitExceeded', 'SubnetLimitExceeded',
|
||||
'UnknownParameter', 'UnsupportedOperation',
|
||||
'VolumeLimitExceeded', 'VpcLimitExceeded',
|
||||
'VpnConnectionLimitExceeded',
|
||||
'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
|
||||
_add_matcher_class(BotoTestCase.ec2_error_code.client,
|
||||
code, base=ClientError)
|
||||
|
||||
for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
|
||||
'InsufficientReservedInstanceCapacity', 'InternalError',
|
||||
'Unavailable'):
|
||||
_add_matcher_class(BotoTestCase.ec2_error_code.server,
|
||||
code, base=ServerError)
|
||||
|
||||
|
||||
for code in (('AccessDenied', 403),
|
||||
('AccountProblem', 403),
|
||||
('AmbiguousGrantByEmailAddress', 400),
|
||||
('BadDigest', 400),
|
||||
('BucketAlreadyExists', 409),
|
||||
('BucketAlreadyOwnedByYou', 409),
|
||||
('BucketNotEmpty', 409),
|
||||
('CredentialsNotSupported', 400),
|
||||
('CrossLocationLoggingProhibited', 403),
|
||||
('EntityTooSmall', 400),
|
||||
('EntityTooLarge', 400),
|
||||
('ExpiredToken', 400),
|
||||
('IllegalVersioningConfigurationException', 400),
|
||||
('IncompleteBody', 400),
|
||||
('IncorrectNumberOfFilesInPostRequest', 400),
|
||||
('InlineDataTooLarge', 400),
|
||||
('InvalidAccessKeyId', 403),
|
||||
'InvalidAddressingHeader',
|
||||
('InvalidArgument', 400),
|
||||
('InvalidBucketName', 400),
|
||||
('InvalidBucketState', 409),
|
||||
('InvalidDigest', 400),
|
||||
('InvalidLocationConstraint', 400),
|
||||
('InvalidPart', 400),
|
||||
('InvalidPartOrder', 400),
|
||||
('InvalidPayer', 403),
|
||||
('InvalidPolicyDocument', 400),
|
||||
('InvalidRange', 416),
|
||||
('InvalidRequest', 400),
|
||||
('InvalidSecurity', 403),
|
||||
('InvalidSOAPRequest', 400),
|
||||
('InvalidStorageClass', 400),
|
||||
('InvalidTargetBucketForLogging', 400),
|
||||
('InvalidToken', 400),
|
||||
('InvalidURI', 400),
|
||||
('KeyTooLong', 400),
|
||||
('MalformedACLError', 400),
|
||||
('MalformedPOSTRequest', 400),
|
||||
('MalformedXML', 400),
|
||||
('MaxMessageLengthExceeded', 400),
|
||||
('MaxPostPreDataLengthExceededError', 400),
|
||||
('MetadataTooLarge', 400),
|
||||
('MethodNotAllowed', 405),
|
||||
('MissingAttachment'),
|
||||
('MissingContentLength', 411),
|
||||
('MissingRequestBodyError', 400),
|
||||
('MissingSecurityElement', 400),
|
||||
('MissingSecurityHeader', 400),
|
||||
('NoLoggingStatusForKey', 400),
|
||||
('NoSuchBucket', 404),
|
||||
('NoSuchKey', 404),
|
||||
('NoSuchLifecycleConfiguration', 404),
|
||||
('NoSuchUpload', 404),
|
||||
('NoSuchVersion', 404),
|
||||
('NotSignedUp', 403),
|
||||
('NotSuchBucketPolicy', 404),
|
||||
('OperationAborted', 409),
|
||||
('PermanentRedirect', 301),
|
||||
('PreconditionFailed', 412),
|
||||
('Redirect', 307),
|
||||
('RequestIsNotMultiPartContent', 400),
|
||||
('RequestTimeout', 400),
|
||||
('RequestTimeTooSkewed', 403),
|
||||
('RequestTorrentOfBucketError', 400),
|
||||
('SignatureDoesNotMatch', 403),
|
||||
('TemporaryRedirect', 307),
|
||||
('TokenRefreshRequired', 400),
|
||||
('TooManyBuckets', 400),
|
||||
('UnexpectedContent', 400),
|
||||
('UnresolvableGrantByEmailAddress', 400),
|
||||
('UserKeyMustBeSpecified', 400)):
|
||||
_add_matcher_class(BotoTestCase.s3_error_code.client,
|
||||
code, base=ClientError)
|
||||
|
||||
|
||||
for code in (('InternalError', 500),
|
||||
('NotImplemented', 501),
|
||||
('ServiceUnavailable', 503),
|
||||
('SlowDown', 503)):
|
||||
_add_matcher_class(BotoTestCase.s3_error_code.server,
|
||||
code, base=ServerError)
|
363
tempest/thirdparty/boto/test_ec2_instance_run.py
vendored
363
tempest/thirdparty/boto/test_ec2_instance_run.py
vendored
@ -1,363 +0,0 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest import test
|
||||
from tempest.thirdparty.boto import test as boto_test
|
||||
from tempest.thirdparty.boto.utils import s3
|
||||
from tempest.thirdparty.boto.utils import wait
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InstanceRunTest(boto_test.BotoTestCase):
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(InstanceRunTest, cls).setup_clients()
|
||||
cls.s3_client = cls.os.s3_client
|
||||
cls.ec2_client = cls.os.ec2api_client
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(InstanceRunTest, cls).resource_setup()
|
||||
if not cls.conclusion['A_I_IMAGES_READY']:
|
||||
raise cls.skipException("".join(("EC2 ", cls.__name__,
|
||||
": requires ami/aki/ari manifest")))
|
||||
cls.zone = CONF.boto.aws_zone
|
||||
cls.materials_path = CONF.boto.s3_materials_path
|
||||
ami_manifest = CONF.boto.ami_manifest
|
||||
aki_manifest = CONF.boto.aki_manifest
|
||||
ari_manifest = CONF.boto.ari_manifest
|
||||
cls.instance_type = CONF.boto.instance_type
|
||||
cls.bucket_name = data_utils.rand_name("s3bucket")
|
||||
cls.keypair_name = data_utils.rand_name("keypair")
|
||||
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
|
||||
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
|
||||
cls.keypair_name)
|
||||
bucket = cls.s3_client.create_bucket(cls.bucket_name)
|
||||
cls.addResourceCleanUp(cls.destroy_bucket,
|
||||
cls.s3_client.connection_data,
|
||||
cls.bucket_name)
|
||||
s3.s3_upload_dir(bucket, cls.materials_path)
|
||||
cls.images = {"ami":
|
||||
{"name": data_utils.rand_name("ami-name"),
|
||||
"location": cls.bucket_name + "/" + ami_manifest},
|
||||
"aki":
|
||||
{"name": data_utils.rand_name("aki-name"),
|
||||
"location": cls.bucket_name + "/" + aki_manifest},
|
||||
"ari":
|
||||
{"name": data_utils.rand_name("ari-name"),
|
||||
"location": cls.bucket_name + "/" + ari_manifest}}
|
||||
for image_type in ("aki", "ari"):
|
||||
image = cls.images[image_type]
|
||||
image["image_id"] = cls.ec2_client.register_image(
|
||||
name=image["name"],
|
||||
image_location=image["location"])
|
||||
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
|
||||
image["image_id"])
|
||||
image = cls.images["ami"]
|
||||
image["image_id"] = cls.ec2_client.register_image(
|
||||
name=image["name"],
|
||||
image_location=image["location"],
|
||||
kernel_id=cls.images["aki"]["image_id"],
|
||||
ramdisk_id=cls.images["ari"]["image_id"])
|
||||
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
|
||||
image["image_id"])
|
||||
|
||||
for image in cls.images.itervalues():
|
||||
def _state():
|
||||
retr = cls.ec2_client.get_image(image["image_id"])
|
||||
return retr.state
|
||||
state = wait.state_wait(_state, "available")
|
||||
if state != "available":
|
||||
for _image in cls.images.itervalues():
|
||||
cls.ec2_client.deregister_image(_image["image_id"])
|
||||
raise exceptions.EC2RegisterImageException(
|
||||
image_id=image["image_id"])
|
||||
|
||||
def _terminate_reservation(self, reservation, rcuk):
|
||||
for instance in reservation.instances:
|
||||
instance.terminate()
|
||||
for instance in reservation.instances:
|
||||
self.assertInstanceStateWait(instance, '_GONE')
|
||||
self.cancelResourceCleanUp(rcuk)
|
||||
|
||||
@test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
|
||||
def test_run_idempotent_instances(self):
|
||||
# EC2 run instances idempotently
|
||||
|
||||
def _run_instance(client_token):
|
||||
reservation = self.ec2_client.run_instances(
|
||||
image_id=self.images["ami"]["image_id"],
|
||||
kernel_id=self.images["aki"]["image_id"],
|
||||
ramdisk_id=self.images["ari"]["image_id"],
|
||||
instance_type=self.instance_type,
|
||||
client_token=client_token)
|
||||
rcuk = self.addResourceCleanUp(self.destroy_reservation,
|
||||
reservation)
|
||||
return (reservation, rcuk)
|
||||
|
||||
reservation_1, rcuk_1 = _run_instance('token_1')
|
||||
reservation_2, rcuk_2 = _run_instance('token_2')
|
||||
reservation_1a, rcuk_1a = _run_instance('token_1')
|
||||
|
||||
self.assertIsNotNone(reservation_1)
|
||||
self.assertIsNotNone(reservation_2)
|
||||
self.assertIsNotNone(reservation_1a)
|
||||
|
||||
# same reservation for token_1
|
||||
self.assertEqual(reservation_1.id, reservation_1a.id)
|
||||
|
||||
# Cancel cleanup -- since it's a duplicate, it's
|
||||
# handled by rcuk1
|
||||
self.cancelResourceCleanUp(rcuk_1a)
|
||||
|
||||
self._terminate_reservation(reservation_1, rcuk_1)
|
||||
self._terminate_reservation(reservation_2, rcuk_2)
|
||||
|
||||
@test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
|
||||
def test_run_stop_terminate_instance(self):
|
||||
# EC2 run, stop and terminate instance
|
||||
image_ami = self.ec2_client.get_image(self.images["ami"]
|
||||
["image_id"])
|
||||
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
|
||||
ramdisk_id=self.images["ari"]["image_id"],
|
||||
instance_type=self.instance_type)
|
||||
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
|
||||
for instance in reservation.instances:
|
||||
LOG.info("state: %s", instance.state)
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
|
||||
for instance in reservation.instances:
|
||||
instance.stop()
|
||||
LOG.info("state: %s", instance.state)
|
||||
if instance.state != "stopped":
|
||||
self.assertInstanceStateWait(instance, "stopped")
|
||||
|
||||
self._terminate_reservation(reservation, rcuk)
|
||||
|
||||
@test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
|
||||
def test_run_stop_terminate_instance_with_tags(self):
|
||||
# EC2 run, stop and terminate instance with tags
|
||||
image_ami = self.ec2_client.get_image(self.images["ami"]
|
||||
["image_id"])
|
||||
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
|
||||
ramdisk_id=self.images["ari"]["image_id"],
|
||||
instance_type=self.instance_type)
|
||||
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
|
||||
|
||||
for instance in reservation.instances:
|
||||
LOG.info("state: %s", instance.state)
|
||||
if instance.state != "running":
|
||||
self.assertInstanceStateWait(instance, "running")
|
||||
instance.add_tag('key1', value='value1')
|
||||
|
||||
tags = self.ec2_client.get_all_tags()
|
||||
td = dict((item.name, item.value) for item in tags)
|
||||
|
||||
self.assertIn('key1', td)
|
||||
self.assertEqual('value1', td['key1'])
|
||||
|
||||
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
|
||||
td = dict((item.name, item.value) for item in tags)
|
||||
self.assertIn('key1', td)
|
||||
self.assertEqual('value1', td['key1'])
|
||||
|
||||
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
|
||||
td = dict((item.name, item.value) for item in tags)
|
||||
self.assertIn('key1', td)
|
||||
self.assertEqual('value1', td['key1'])
|
||||
|
||||
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
|
||||
td = dict((item.name, item.value) for item in tags)
|
||||
self.assertNotIn('key1', td)
|
||||
|
||||
for instance in reservation.instances:
|
||||
instance.remove_tag('key1', value='value1')
|
||||
|
||||
tags = self.ec2_client.get_all_tags()
|
||||
|
||||
# NOTE: Volume-attach and detach causes metadata (tags) to be created
|
||||
# for the volume. So exclude them while asserting.
|
||||
self.assertNotIn('key1', tags)
|
||||
|
||||
for instance in reservation.instances:
|
||||
instance.stop()
|
||||
LOG.info("state: %s", instance.state)
|
||||
if instance.state != "stopped":
|
||||
self.assertInstanceStateWait(instance, "stopped")
|
||||
|
||||
self._terminate_reservation(reservation, rcuk)
|
||||
|
||||
@test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
|
||||
def test_run_terminate_instance(self):
|
||||
# EC2 run, terminate immediately
|
||||
image_ami = self.ec2_client.get_image(self.images["ami"]
|
||||
["image_id"])
|
||||
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
|
||||
|