Pull lbaasv1 tempest tests in-tree, use installed neutron for v1/v2 job
Fix the lbaas v2 api job to use the installed neutron and neutron-lbaas in CI land. I'm not super happy about tox still being in the mix, so expect it to run testr directly in a future commit, but the output gets all horked when I switch, and I'd rather get the job fixed and tackle that second. Also pulls in a subset of tempest, to be used during the migration to tempest-lib. Future test commits need to move us away from the in-tree tempest, and start removing it. Also yank the check_bash check, which was removed from neutron. Change-Id: I2790239b8cd361bc4ac7905b4118f34648b97840
This commit is contained in:
parent
894ae2d195
commit
a4f03623d7
|
@ -2,34 +2,34 @@
|
|||
|
||||
set -ex
|
||||
|
||||
venv=${1:-"tempest"}
|
||||
testenv=${1:-"apiv2"}
|
||||
|
||||
export DEVSTACK_LOCAL_CONFIG="enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas"
|
||||
|
||||
# Override enabled services, so we can turn on lbaasv2.
|
||||
# While we're at it, disable cinder and swift, since we don't need them.
|
||||
s=""
|
||||
#s+="c-api,c-bak,c-sch,c-vol,"
|
||||
s+="ceilometer-acentral,ceilometer-acompute,ceilometer-alarm-evaluator"
|
||||
s+=",ceilometer-alarm-notifier,ceilometer-anotification,ceilometer-api"
|
||||
s+=",ceilometer-collector"
|
||||
#s+=",cinder"
|
||||
s+=",dstat"
|
||||
s+=",g-api,g-reg"
|
||||
s+=",h-api,h-api-cfn,h-api-cw,h-eng"
|
||||
s+=",heat"
|
||||
s+=",horizon"
|
||||
s+=",key"
|
||||
s+=",mysql"
|
||||
s+=",n-api,n-cond,n-cpu,n-crt,n-obj,n-sch"
|
||||
s+=",q-agt,q-dhcp,q-fwaas,q-l3,q-meta,q-metering,q-svc,q-vpn,quantum"
|
||||
s+=",q-lbaasv2"
|
||||
s+=",rabbit"
|
||||
#s+=",s-account,s-container,s-object,s-proxy"
|
||||
s+=",sahara"
|
||||
s+=",tempest"
|
||||
export OVERRIDE_ENABLED_SERVICES="$s"
|
||||
|
||||
if [ "$venv" == "tempest" ]; then
|
||||
$BASE/new/devstack-gate/devstack-vm-gate.sh
|
||||
if [ "$testenv" != "apiv1" ]; then
|
||||
# Override enabled services, so we can turn on lbaasv2.
|
||||
# While we're at it, disable cinder and swift, since we don't need them.
|
||||
s=""
|
||||
#s+="c-api,c-bak,c-sch,c-vol,"
|
||||
s+="ceilometer-acentral,ceilometer-acompute,ceilometer-alarm-evaluator"
|
||||
s+=",ceilometer-alarm-notifier,ceilometer-anotification,ceilometer-api"
|
||||
s+=",ceilometer-collector"
|
||||
#s+=",cinder"
|
||||
s+=",dstat"
|
||||
s+=",g-api,g-reg"
|
||||
s+=",h-api,h-api-cfn,h-api-cw,h-eng"
|
||||
s+=",heat"
|
||||
s+=",horizon"
|
||||
s+=",key"
|
||||
s+=",mysql"
|
||||
s+=",n-api,n-cond,n-cpu,n-crt,n-obj,n-sch"
|
||||
s+=",q-agt,q-dhcp,q-fwaas,q-l3,q-meta,q-metering,q-svc,q-vpn,quantum"
|
||||
s+=",q-lbaasv2"
|
||||
s+=",rabbit"
|
||||
#s+=",s-account,s-container,s-object,s-proxy"
|
||||
s+=",sahara"
|
||||
s+=",tempest"
|
||||
export OVERRIDE_ENABLED_SERVICES="$s"
|
||||
fi
|
||||
|
||||
$BASE/new/devstack-gate/devstack-vm-gate.sh
|
||||
|
|
|
@ -6,35 +6,48 @@ NEUTRON_LBAAS_DIR="$BASE/new/neutron-lbaas"
|
|||
TEMPEST_DIR="$BASE/new/tempest"
|
||||
SCRIPTS_DIR="/usr/local/jenkins/slave_scripts"
|
||||
|
||||
venv=${1:-"tempest"}
|
||||
testenv=${1:-"apiv2"}
|
||||
|
||||
function generate_testr_results {
|
||||
# Give job user rights to access tox logs
|
||||
sudo -H -u $owner chmod o+rw .
|
||||
sudo -H -u $owner chmod o+rw -R .testrepository
|
||||
if [ -f ".testrepository/0" ] ; then
|
||||
.tox/$venv/bin/subunit-1to2 < .testrepository/0 > ./testrepository.subunit
|
||||
.tox/$venv/bin/python $SCRIPTS_DIR/subunit2html.py ./testrepository.subunit testr_results.html
|
||||
subunit-1to2 < .testrepository/0 > ./testrepository.subunit
|
||||
python $SCRIPTS_DIR/subunit2html.py ./testrepository.subunit testr_results.html
|
||||
gzip -9 ./testrepository.subunit
|
||||
gzip -9 ./testr_results.html
|
||||
sudo mv ./*.gz /opt/stack/logs/
|
||||
fi
|
||||
}
|
||||
|
||||
if [ "$venv" == "tempest" ]; then
|
||||
owner=tempest
|
||||
# Configure the api tests to use the tempest.conf set by devstack.
|
||||
sudo cp $TEMPEST_DIR/etc/tempest.conf $NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/etc
|
||||
fi
|
||||
owner=tempest
|
||||
# Configure the api tests to use the tempest.conf set by devstack.
|
||||
sudo cp $TEMPEST_DIR/etc/tempest.conf $NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/etc
|
||||
|
||||
# Set owner permissions according to job's requirements.
|
||||
cd $NEUTRON_LBAAS_DIR
|
||||
sudo chown -R $owner:stack $NEUTRON_LBAAS_DIR
|
||||
|
||||
sudo_env=" OS_TESTR_CONCURRENCY=1"
|
||||
|
||||
if [ "$testenv" = "apiv2" ]; then
|
||||
sudo_env+="OS_TEST_PATH=$NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/v2/api"
|
||||
elif [ "$testenv" = "apiv1" ]; then
|
||||
sudo_env+="OS_TEST_PATH=$NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/v1/api"
|
||||
else
|
||||
echo "ERROR: unsupported testenv: $testenv"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
echo "Running neutron lbaas $venv test suite"
|
||||
echo "Running neutron lbaas $testenv test suite"
|
||||
set +e
|
||||
sudo -H -u $owner $sudo_env tox -e $venv
|
||||
|
||||
sudo -H -u $owner $sudo_env tox -e $testenv
|
||||
# sudo -H -u $owner $sudo_env testr init
|
||||
# sudo -H -u $owner $sudo_env testr run
|
||||
|
||||
testr_exit_code=$?
|
||||
set -e
|
||||
|
||||
|
|
|
@ -0,0 +1,440 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.services.identity.v2.token_client import TokenClientJSON
|
||||
from tempest_lib.services.identity.v3.token_client import V3TokenClientJSON
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
from neutron_lbaas.tests.tempest.lib.common import negative_rest_client
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
from neutron_lbaas.tests.tempest.lib import manager
|
||||
# from neutron_lbaas.tests.tempest.lib.services.baremetal.v1.json.baremetal_client import \
|
||||
# BaremetalClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services import botoclients
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.agents_client import \
|
||||
# AgentsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.aggregates_client import \
|
||||
# AggregatesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.availability_zone_client import \
|
||||
# AvailabilityZoneClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.baremetal_nodes_client import \
|
||||
# BaremetalNodesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.certificates_client import \
|
||||
# CertificatesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.extensions_client import \
|
||||
# ExtensionsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.fixed_ips_client import FixedIPsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.flavors_client import FlavorsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.floating_ips_client import \
|
||||
# FloatingIPsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.hosts_client import HostsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.hypervisor_client import \
|
||||
# HypervisorClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.images_client import ImagesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.instance_usage_audit_log_client import \
|
||||
# InstanceUsagesAuditLogClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.interfaces_client import \
|
||||
# InterfacesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.keypairs_client import KeyPairsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.limits_client import LimitsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.migrations_client import \
|
||||
# MigrationsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.networks_client import NetworksClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.quotas_client import QuotaClassesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.quotas_client import QuotasClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.security_group_default_rules_client import \
|
||||
# SecurityGroupDefaultRulesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.security_groups_client import \
|
||||
# SecurityGroupsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.servers_client import ServersClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.services_client import ServicesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.tenant_networks_client import \
|
||||
# TenantNetworksClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.tenant_usages_client import \
|
||||
# TenantUsagesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.compute.json.volumes_extensions_client import \
|
||||
# VolumesExtensionsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.data_processing.v1_1.data_processing_client import \
|
||||
# DataProcessingClient
|
||||
# from neutron_lbaas.tests.tempest.lib.services.database.json.flavors_client import \
|
||||
# DatabaseFlavorsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.database.json.limits_client import \
|
||||
# DatabaseLimitsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.database.json.versions_client import \
|
||||
# DatabaseVersionsClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v2.json.identity_client import \
|
||||
IdentityClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.credentials_client import \
|
||||
CredentialsClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.endpoints_client import \
|
||||
EndPointClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.identity_client import \
|
||||
IdentityV3ClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.policy_client import PolicyClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.region_client import RegionClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.service_client import \
|
||||
ServiceClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.image.v1.json.image_client import ImageClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.image.v2.json.image_client import ImageClientV2JSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.messaging.json.messaging_client import \
|
||||
# MessagingClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import NetworkClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.object_storage.account_client import AccountClient
|
||||
# from neutron_lbaas.tests.tempest.lib.services.object_storage.container_client import ContainerClient
|
||||
# from neutron_lbaas.tests.tempest.lib.services.object_storage.object_client import ObjectClient
|
||||
# from neutron_lbaas.tests.tempest.lib.services.orchestration.json.orchestration_client import \
|
||||
# OrchestrationClient
|
||||
# from neutron_lbaas.tests.tempest.lib.services.telemetry.json.telemetry_client import \
|
||||
# TelemetryClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.admin.volume_hosts_client import \
|
||||
# VolumeHostsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.admin.volume_quotas_client import \
|
||||
# VolumeQuotasClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.admin.volume_services_client import \
|
||||
# VolumesServicesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.admin.volume_types_client import \
|
||||
# VolumeTypesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.availability_zone_client import \
|
||||
# VolumeAvailabilityZoneClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.backups_client import BackupsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.extensions_client import \
|
||||
# ExtensionsClientJSON as VolumeExtensionClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.qos_client import QosSpecsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.snapshots_client import SnapshotsClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.json.volumes_client import VolumesClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_hosts_client import \
|
||||
# VolumeHostsV2ClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_quotas_client import \
|
||||
# VolumeQuotasV2Client
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_services_client import \
|
||||
# VolumesServicesV2ClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_types_client import \
|
||||
# VolumeTypesV2ClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.availability_zone_client import \
|
||||
# VolumeV2AvailabilityZoneClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.backups_client import BackupsClientV2JSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.extensions_client import \
|
||||
# ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.snapshots_client import \
|
||||
# SnapshotsV2ClientJSON
|
||||
# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Manager(manager.Manager):
|
||||
|
||||
"""
|
||||
Top level manager for OpenStack tempest clients
|
||||
"""
|
||||
|
||||
default_params = {
|
||||
'disable_ssl_certificate_validation':
|
||||
CONF.identity.disable_ssl_certificate_validation,
|
||||
'ca_certs': CONF.identity.ca_certificates_file,
|
||||
'trace_requests': CONF.debug.trace_requests
|
||||
}
|
||||
|
||||
# NOTE: Tempest uses timeout values of compute API if project specific
|
||||
# timeout values don't exist.
|
||||
default_params_with_timeout_values = {
|
||||
'build_interval': CONF.compute.build_interval,
|
||||
'build_timeout': CONF.compute.build_timeout
|
||||
}
|
||||
default_params_with_timeout_values.update(default_params)
|
||||
|
||||
def __init__(self, credentials=None, service=None):
|
||||
super(Manager, self).__init__(credentials=credentials)
|
||||
|
||||
# self._set_compute_clients()
|
||||
# self._set_database_clients()
|
||||
self._set_identity_clients()
|
||||
# self._set_volume_clients()
|
||||
# self._set_object_storage_clients()
|
||||
|
||||
# self.baremetal_client = BaremetalClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.baremetal.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# endpoint_type=CONF.baremetal.endpoint_type,
|
||||
# **self.default_params_with_timeout_values)
|
||||
self.network_client = NetworkClientJSON(
|
||||
self.auth_provider,
|
||||
CONF.network.catalog_type,
|
||||
CONF.network.region or CONF.identity.region,
|
||||
endpoint_type=CONF.network.endpoint_type,
|
||||
build_interval=CONF.network.build_interval,
|
||||
build_timeout=CONF.network.build_timeout,
|
||||
**self.default_params)
|
||||
# self.messaging_client = MessagingClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.messaging.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# **self.default_params_with_timeout_values)
|
||||
# if CONF.service_available.ceilometer:
|
||||
# self.telemetry_client = TelemetryClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.telemetry.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# endpoint_type=CONF.telemetry.endpoint_type,
|
||||
# **self.default_params_with_timeout_values)
|
||||
# if CONF.service_available.glance:
|
||||
# self.image_client = ImageClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.image.catalog_type,
|
||||
# CONF.image.region or CONF.identity.region,
|
||||
# endpoint_type=CONF.image.endpoint_type,
|
||||
# build_interval=CONF.image.build_interval,
|
||||
# build_timeout=CONF.image.build_timeout,
|
||||
# **self.default_params)
|
||||
# self.image_client_v2 = ImageClientV2JSON(
|
||||
# self.auth_provider,
|
||||
# CONF.image.catalog_type,
|
||||
# CONF.image.region or CONF.identity.region,
|
||||
# endpoint_type=CONF.image.endpoint_type,
|
||||
# build_interval=CONF.image.build_interval,
|
||||
# build_timeout=CONF.image.build_timeout,
|
||||
# **self.default_params)
|
||||
# self.orchestration_client = OrchestrationClient(
|
||||
# self.auth_provider,
|
||||
# CONF.orchestration.catalog_type,
|
||||
# CONF.orchestration.region or CONF.identity.region,
|
||||
# endpoint_type=CONF.orchestration.endpoint_type,
|
||||
# build_interval=CONF.orchestration.build_interval,
|
||||
# build_timeout=CONF.orchestration.build_timeout,
|
||||
# **self.default_params)
|
||||
# self.data_processing_client = DataProcessingClient(
|
||||
# self.auth_provider,
|
||||
# CONF.data_processing.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# endpoint_type=CONF.data_processing.endpoint_type,
|
||||
# **self.default_params_with_timeout_values)
|
||||
# self.negative_client = negative_rest_client.NegativeRestClient(
|
||||
# self.auth_provider, service, **self.default_params)
|
||||
|
||||
# Generating EC2 credentials in tempest is only supported
|
||||
# with identity v2
|
||||
# if CONF.identity_feature_enabled.api_v2 and \
|
||||
# CONF.identity.auth_version == 'v2':
|
||||
# # EC2 and S3 clients, if used, will check onfigured AWS credentials
|
||||
# # and generate new ones if needed
|
||||
# self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
|
||||
# self.s3_client = botoclients.ObjectClientS3(self.identity_client)
|
||||
|
||||
# def _set_compute_clients(self):
|
||||
# params = {
|
||||
# 'service': CONF.compute.catalog_type,
|
||||
# 'region': CONF.compute.region or CONF.identity.region,
|
||||
# 'endpoint_type': CONF.compute.endpoint_type,
|
||||
# 'build_interval': CONF.compute.build_interval,
|
||||
# 'build_timeout': CONF.compute.build_timeout
|
||||
# }
|
||||
# params.update(self.default_params)
|
||||
|
||||
# self.agents_client = AgentsClientJSON(self.auth_provider, **params)
|
||||
# self.networks_client = NetworksClientJSON(self.auth_provider, **params)
|
||||
# self.migrations_client = MigrationsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.security_group_default_rules_client = (
|
||||
# SecurityGroupDefaultRulesClientJSON(self.auth_provider, **params))
|
||||
# self.certificates_client = CertificatesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.servers_client = ServersClientJSON(
|
||||
# self.auth_provider,
|
||||
# enable_instance_password=CONF.compute_feature_enabled
|
||||
# .enable_instance_password,
|
||||
# **params)
|
||||
# self.limits_client = LimitsClientJSON(self.auth_provider, **params)
|
||||
# self.images_client = ImagesClientJSON(self.auth_provider, **params)
|
||||
# self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params)
|
||||
# self.quotas_client = QuotasClientJSON(self.auth_provider, **params)
|
||||
# self.quota_classes_client = QuotaClassesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.flavors_client = FlavorsClientJSON(self.auth_provider, **params)
|
||||
# self.extensions_client = ExtensionsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.security_groups_client = SecurityGroupsClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.interfaces_client = InterfacesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.availability_zone_client = AvailabilityZoneClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.aggregates_client = AggregatesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.services_client = ServicesClientJSON(self.auth_provider, **params)
|
||||
# self.tenant_usages_client = TenantUsagesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.hosts_client = HostsClientJSON(self.auth_provider, **params)
|
||||
# self.hypervisor_client = HypervisorClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.instance_usages_audit_log_client = \
|
||||
# InstanceUsagesAuditLogClientJSON(self.auth_provider, **params)
|
||||
# self.tenant_networks_client = \
|
||||
# TenantNetworksClientJSON(self.auth_provider, **params)
|
||||
# self.baremetal_nodes_client = BaremetalNodesClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
|
||||
# # NOTE: The following client needs special timeout values because
|
||||
# # the API is a proxy for the other component.
|
||||
# params_volume = copy.deepcopy(params)
|
||||
# params_volume.update({
|
||||
# 'build_interval': CONF.volume.build_interval,
|
||||
# 'build_timeout': CONF.volume.build_timeout
|
||||
# })
|
||||
# self.volumes_extensions_client = VolumesExtensionsClientJSON(
|
||||
# self.auth_provider, default_volume_size=CONF.volume.volume_size,
|
||||
# **params_volume)
|
||||
|
||||
# def _set_database_clients(self):
|
||||
# self.database_flavors_client = DatabaseFlavorsClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.database.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# **self.default_params_with_timeout_values)
|
||||
# self.database_limits_client = DatabaseLimitsClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.database.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# **self.default_params_with_timeout_values)
|
||||
# self.database_versions_client = DatabaseVersionsClientJSON(
|
||||
# self.auth_provider,
|
||||
# CONF.database.catalog_type,
|
||||
# CONF.identity.region,
|
||||
# **self.default_params_with_timeout_values)
|
||||
|
||||
def _set_identity_clients(self):
|
||||
params = {
|
||||
'service': CONF.identity.catalog_type,
|
||||
'region': CONF.identity.region,
|
||||
'endpoint_type': 'adminURL'
|
||||
}
|
||||
params.update(self.default_params_with_timeout_values)
|
||||
|
||||
self.identity_client = IdentityClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.endpoints_client = EndPointClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.service_client = ServiceClientJSON(self.auth_provider, **params)
|
||||
self.policy_client = PolicyClientJSON(self.auth_provider, **params)
|
||||
self.region_client = RegionClientJSON(self.auth_provider, **params)
|
||||
self.credentials_client = CredentialsClientJSON(self.auth_provider,
|
||||
**params)
|
||||
# Token clients do not use the catalog. They only need default_params.
|
||||
# They read auth_url, so they should only be set if the corresponding
|
||||
# API version is marked as enabled
|
||||
if CONF.identity_feature_enabled.api_v2:
|
||||
if CONF.identity.uri:
|
||||
self.token_client = TokenClientJSON(
|
||||
CONF.identity.uri, **self.default_params)
|
||||
else:
|
||||
msg = 'Identity v2 API enabled, but no identity.uri set'
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
if CONF.identity_feature_enabled.api_v3:
|
||||
if CONF.identity.uri_v3:
|
||||
self.token_v3_client = V3TokenClientJSON(
|
||||
CONF.identity.uri_v3, **self.default_params)
|
||||
else:
|
||||
msg = 'Identity v3 API enabled, but no identity.uri_v3 set'
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
# def _set_volume_clients(self):
|
||||
# params = {
|
||||
# 'service': CONF.volume.catalog_type,
|
||||
# 'region': CONF.volume.region or CONF.identity.region,
|
||||
# 'endpoint_type': CONF.volume.endpoint_type,
|
||||
# 'build_interval': CONF.volume.build_interval,
|
||||
# 'build_timeout': CONF.volume.build_timeout
|
||||
# }
|
||||
# params.update(self.default_params)
|
||||
|
||||
# self.volume_qos_client = QosSpecsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.volume_qos_v2_client = QosSpecsV2ClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.volume_services_v2_client = VolumesServicesV2ClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.backups_client = BackupsClientJSON(self.auth_provider, **params)
|
||||
# self.backups_v2_client = BackupsClientV2JSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.snapshots_client = SnapshotsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.volumes_client = VolumesClientJSON(
|
||||
# self.auth_provider, default_volume_size=CONF.volume.volume_size,
|
||||
# **params)
|
||||
# self.volumes_v2_client = VolumesV2ClientJSON(
|
||||
# self.auth_provider, default_volume_size=CONF.volume.volume_size,
|
||||
# **params)
|
||||
# self.volume_types_client = VolumeTypesClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.volume_services_client = VolumesServicesClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.volume_hosts_client = VolumeHostsClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.volume_hosts_v2_client = VolumeHostsV2ClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.volume_quotas_client = VolumeQuotasClientJSON(self.auth_provider,
|
||||
# **params)
|
||||
# self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
|
||||
# **params)
|
||||
# self.volumes_extension_client = VolumeExtensionClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.volumes_v2_extension_client = VolumeV2ExtensionClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
# self.volume_availability_zone_client = \
|
||||
# VolumeAvailabilityZoneClientJSON(self.auth_provider, **params)
|
||||
# self.volume_v2_availability_zone_client = \
|
||||
# VolumeV2AvailabilityZoneClientJSON(self.auth_provider, **params)
|
||||
# self.volume_types_v2_client = VolumeTypesV2ClientJSON(
|
||||
# self.auth_provider, **params)
|
||||
|
||||
# def _set_object_storage_clients(self):
|
||||
# params = {
|
||||
# 'service': CONF.object_storage.catalog_type,
|
||||
# 'region': CONF.object_storage.region or CONF.identity.region,
|
||||
# 'endpoint_type': CONF.object_storage.endpoint_type
|
||||
# }
|
||||
# params.update(self.default_params_with_timeout_values)
|
||||
|
||||
# self.account_client = AccountClient(self.auth_provider, **params)
|
||||
# self.container_client = ContainerClient(self.auth_provider, **params)
|
||||
# self.object_client = ObjectClient(self.auth_provider, **params)
|
||||
|
||||
|
||||
class AdminManager(Manager):
|
||||
|
||||
"""
|
||||
Manager object that uses the admin credentials for its
|
||||
managed client objects
|
||||
"""
|
||||
|
||||
def __init__(self, service=None):
|
||||
super(AdminManager, self).__init__(
|
||||
credentials=cred_provider.get_configured_credentials(
|
||||
'identity_admin'),
|
||||
service=service)
|
|
@ -0,0 +1,355 @@
|
|||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import clients
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
from neutron_lbaas.tests.tempest.lib.common import fixed_network
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def read_accounts_yaml(path):
|
||||
yaml_file = open(path, 'r')
|
||||
accounts = yaml.load(yaml_file)
|
||||
return accounts
|
||||
|
||||
|
||||
class Accounts(cred_provider.CredentialProvider):
|
||||
|
||||
def __init__(self, identity_version=None, name=None):
|
||||
super(Accounts, self).__init__(identity_version=identity_version,
|
||||
name=name)
|
||||
if (CONF.auth.test_accounts_file and
|
||||
os.path.isfile(CONF.auth.test_accounts_file)):
|
||||
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
|
||||
self.use_default_creds = False
|
||||
else:
|
||||
accounts = {}
|
||||
self.use_default_creds = True
|
||||
self.hash_dict = self.get_hash_dict(accounts)
|
||||
self.accounts_dir = os.path.join(lockutils.get_lock_path(CONF),
|
||||
'test_accounts')
|
||||
self.isolated_creds = {}
|
||||
|
||||
@classmethod
|
||||
def _append_role(cls, role, account_hash, hash_dict):
|
||||
if role in hash_dict['roles']:
|
||||
hash_dict['roles'][role].append(account_hash)
|
||||
else:
|
||||
hash_dict['roles'][role] = [account_hash]
|
||||
return hash_dict
|
||||
|
||||
@classmethod
|
||||
def get_hash_dict(cls, accounts):
|
||||
hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
|
||||
# Loop over the accounts read from the yaml file
|
||||
for account in accounts:
|
||||
roles = []
|
||||
types = []
|
||||
resources = []
|
||||
if 'roles' in account:
|
||||
roles = account.pop('roles')
|
||||
if 'types' in account:
|
||||
types = account.pop('types')
|
||||
if 'resources' in account:
|
||||
resources = account.pop('resources')
|
||||
temp_hash = hashlib.md5()
|
||||
temp_hash.update(six.text_type(account).encode('utf-8'))
|
||||
temp_hash_key = temp_hash.hexdigest()
|
||||
hash_dict['creds'][temp_hash_key] = account
|
||||
for role in roles:
|
||||
hash_dict = cls._append_role(role, temp_hash_key,
|
||||
hash_dict)
|
||||
# If types are set for the account append the matching role
|
||||
# subdict with the hash
|
||||
for type in types:
|
||||
if type == 'admin':
|
||||
hash_dict = cls._append_role(CONF.identity.admin_role,
|
||||
temp_hash_key, hash_dict)
|
||||
elif type == 'operator':
|
||||
hash_dict = cls._append_role(
|
||||
CONF.object_storage.operator_role, temp_hash_key,
|
||||
hash_dict)
|
||||
elif type == 'reseller_admin':
|
||||
hash_dict = cls._append_role(
|
||||
CONF.object_storage.reseller_admin_role,
|
||||
temp_hash_key,
|
||||
hash_dict)
|
||||
# Populate the network subdict
|
||||
for resource in resources:
|
||||
if resource == 'network':
|
||||
hash_dict['networks'][temp_hash_key] = resources[resource]
|
||||
else:
|
||||
LOG.warning('Unkown resource type %s, ignoring this field'
|
||||
% resource)
|
||||
return hash_dict
|
||||
|
||||
def is_multi_user(self):
|
||||
# Default credentials is not a valid option with locking Account
|
||||
if self.use_default_creds:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
|
||||
else:
|
||||
return len(self.hash_dict['creds']) > 1
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return self.is_multi_user()
|
||||
|
||||
def _create_hash_file(self, hash_string):
|
||||
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
|
||||
if not os.path.isfile(path):
|
||||
with open(path, 'w') as fd:
|
||||
fd.write(self.name)
|
||||
return True
|
||||
return False
|
||||
|
||||
@lockutils.synchronized('test_accounts_io', external=True)
|
||||
def _get_free_hash(self, hashes):
|
||||
# Cast as a list because in some edge cases a set will be passed in
|
||||
hashes = list(hashes)
|
||||
if not os.path.isdir(self.accounts_dir):
|
||||
os.mkdir(self.accounts_dir)
|
||||
# Create File from first hash (since none are in use)
|
||||
self._create_hash_file(hashes[0])
|
||||
return hashes[0]
|
||||
names = []
|
||||
for _hash in hashes:
|
||||
res = self._create_hash_file(_hash)
|
||||
if res:
|
||||
return _hash
|
||||
else:
|
||||
path = os.path.join(os.path.join(self.accounts_dir,
|
||||
_hash))
|
||||
with open(path, 'r') as fd:
|
||||
names.append(fd.read())
|
||||
msg = ('Insufficient number of users provided. %s have allocated all '
|
||||
'the credentials for this allocation request' % ','.join(names))
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
def _get_match_hash_list(self, roles=None):
|
||||
hashes = []
|
||||
if roles:
|
||||
# Loop over all the creds for each role in the subdict and generate
|
||||
# a list of cred lists for each role
|
||||
for role in roles:
|
||||
temp_hashes = self.hash_dict['roles'].get(role, None)
|
||||
if not temp_hashes:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"No credentials with role: %s specified in the "
|
||||
"accounts ""file" % role)
|
||||
hashes.append(temp_hashes)
|
||||
# Take the list of lists and do a boolean and between each list to
|
||||
# find the creds which fall under all the specified roles
|
||||
temp_list = set(hashes[0])
|
||||
for hash_list in hashes[1:]:
|
||||
temp_list = temp_list & set(hash_list)
|
||||
hashes = temp_list
|
||||
else:
|
||||
hashes = self.hash_dict['creds'].keys()
|
||||
# NOTE(mtreinish): admin is a special case because of the increased
|
||||
# privlege set which could potentially cause issues on tests where that
|
||||
# is not expected. So unless the admin role isn't specified do not
|
||||
# allocate admin.
|
||||
admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
|
||||
None)
|
||||
if ((not roles or CONF.identity.admin_role not in roles) and
|
||||
admin_hashes):
|
||||
useable_hashes = [x for x in hashes if x not in admin_hashes]
|
||||
else:
|
||||
useable_hashes = hashes
|
||||
return useable_hashes
|
||||
|
||||
def _sanitize_creds(self, creds):
|
||||
temp_creds = creds.copy()
|
||||
temp_creds.pop('password')
|
||||
return temp_creds
|
||||
|
||||
def _get_creds(self, roles=None):
|
||||
if self.use_default_creds:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
|
||||
useable_hashes = self._get_match_hash_list(roles)
|
||||
free_hash = self._get_free_hash(useable_hashes)
|
||||
clean_creds = self._sanitize_creds(
|
||||
self.hash_dict['creds'][free_hash])
|
||||
LOG.info('%s allocated creds:\n%s' % (self.name, clean_creds))
|
||||
return self._wrap_creds_with_network(free_hash)
|
||||
|
||||
@lockutils.synchronized('test_accounts_io', external=True)
|
||||
def remove_hash(self, hash_string):
|
||||
hash_path = os.path.join(self.accounts_dir, hash_string)
|
||||
if not os.path.isfile(hash_path):
|
||||
LOG.warning('Expected an account lock file %s to remove, but '
|
||||
'one did not exist' % hash_path)
|
||||
else:
|
||||
os.remove(hash_path)
|
||||
if not os.listdir(self.accounts_dir):
|
||||
os.rmdir(self.accounts_dir)
|
||||
|
||||
def get_hash(self, creds):
|
||||
for _hash in self.hash_dict['creds']:
|
||||
# Comparing on the attributes that are expected in the YAML
|
||||
init_attributes = creds.get_init_attributes()
|
||||
hash_attributes = self.hash_dict['creds'][_hash].copy()
|
||||
if ('user_domain_name' in init_attributes and 'user_domain_name'
|
||||
not in hash_attributes):
|
||||
# Allow for the case of domain_name populated from config
|
||||
domain_name = CONF.identity.admin_domain_name
|
||||
hash_attributes['user_domain_name'] = domain_name
|
||||
if all([getattr(creds, k) == hash_attributes[k] for
|
||||
k in init_attributes]):
|
||||
return _hash
|
||||
raise AttributeError('Invalid credentials %s' % creds)
|
||||
|
||||
def remove_credentials(self, creds):
|
||||
_hash = self.get_hash(creds)
|
||||
clean_creds = self._sanitize_creds(self.hash_dict['creds'][_hash])
|
||||
self.remove_hash(_hash)
|
||||
LOG.info("%s returned allocated creds:\n%s" % (self.name, clean_creds))
|
||||
|
||||
def get_primary_creds(self):
|
||||
if self.isolated_creds.get('primary'):
|
||||
return self.isolated_creds.get('primary')
|
||||
net_creds = self._get_creds()
|
||||
self.isolated_creds['primary'] = net_creds
|
||||
return net_creds
|
||||
|
||||
def get_alt_creds(self):
|
||||
if self.isolated_creds.get('alt'):
|
||||
return self.isolated_creds.get('alt')
|
||||
net_creds = self._get_creds()
|
||||
self.isolated_creds['alt'] = net_creds
|
||||
return net_creds
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
roles = list(set(roles))
|
||||
exist_creds = self.isolated_creds.get(six.text_type(roles).encode(
|
||||
'utf-8'), None)
|
||||
# The force kwarg is used to allocate an additional set of creds with
|
||||
# the same role list. The index used for the previously allocation
|
||||
# in the isolated_creds dict will be moved.
|
||||
if exist_creds and not force_new:
|
||||
return exist_creds
|
||||
elif exist_creds and force_new:
|
||||
new_index = six.text_type(roles).encode('utf-8') + '-' + \
|
||||
six.text_type(len(self.isolated_creds)).encode('utf-8')
|
||||
self.isolated_creds[new_index] = exist_creds
|
||||
net_creds = self._get_creds(roles=roles)
|
||||
self.isolated_creds[six.text_type(roles).encode('utf-8')] = net_creds
|
||||
return net_creds
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
for creds in self.isolated_creds.values():
|
||||
self.remove_credentials(creds)
|
||||
|
||||
def get_admin_creds(self):
|
||||
return self.get_creds_by_roles([CONF.identity.admin_role])
|
||||
|
||||
def is_role_available(self, role):
|
||||
if self.use_default_creds:
|
||||
return False
|
||||
else:
|
||||
if self.hash_dict['roles'].get(role):
|
||||
return True
|
||||
return False
|
||||
|
||||
def admin_available(self):
|
||||
return self.is_role_available(CONF.identity.admin_role)
|
||||
|
||||
def _wrap_creds_with_network(self, hash):
|
||||
creds_dict = self.hash_dict['creds'][hash]
|
||||
credential = cred_provider.get_credentials(
|
||||
identity_version=self.identity_version, **creds_dict)
|
||||
net_creds = cred_provider.TestResources(credential)
|
||||
net_clients = clients.Manager(credentials=credential)
|
||||
compute_network_client = net_clients.networks_client
|
||||
net_name = self.hash_dict['networks'].get(hash, None)
|
||||
try:
|
||||
network = fixed_network.get_network_from_name(
|
||||
net_name, compute_network_client)
|
||||
except exceptions.InvalidConfiguration:
|
||||
network = {}
|
||||
net_creds.set_resources(network=network)
|
||||
return net_creds
|
||||
|
||||
|
||||
class NotLockingAccounts(Accounts):
|
||||
"""Credentials provider which always returns the first and second
|
||||
configured accounts as primary and alt users.
|
||||
This credential provider can be used in case of serial test execution
|
||||
to preserve the current behaviour of the serial tempest run.
|
||||
"""
|
||||
|
||||
def _unique_creds(self, cred_arg=None):
|
||||
"""Verify that the configured credentials are valid and distinct """
|
||||
try:
|
||||
user = self.get_primary_creds()
|
||||
alt_user = self.get_alt_creds()
|
||||
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
|
||||
except exceptions.InvalidCredentials as ic:
|
||||
msg = "At least one of the configured credentials is " \
|
||||
"not valid: %s" % ic.message
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
def is_multi_user(self):
|
||||
return self._unique_creds('username')
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return self._unique_creds('tenant_id')
|
||||
|
||||
def get_primary_creds(self):
|
||||
if self.isolated_creds.get('primary'):
|
||||
return self.isolated_creds.get('primary')
|
||||
primary_credential = cred_provider.get_configured_credentials(
|
||||
credential_type='user', identity_version=self.identity_version)
|
||||
self.isolated_creds['primary'] = cred_provider.TestResources(
|
||||
primary_credential)
|
||||
return self.isolated_creds['primary']
|
||||
|
||||
def get_alt_creds(self):
|
||||
if self.isolated_creds.get('alt'):
|
||||
return self.isolated_creds.get('alt')
|
||||
alt_credential = cred_provider.get_configured_credentials(
|
||||
credential_type='alt_user',
|
||||
identity_version=self.identity_version)
|
||||
self.isolated_creds['alt'] = cred_provider.TestResources(
|
||||
alt_credential)
|
||||
return self.isolated_creds['alt']
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
self.isolated_creds = {}
|
||||
|
||||
def get_admin_creds(self):
|
||||
creds = cred_provider.get_configured_credentials(
|
||||
"identity_admin", fill_in=False)
|
||||
self.isolated_creds['admin'] = cred_provider.TestResources(creds)
|
||||
return self.isolated_creds['admin']
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
msg = "Credentials being specified through the config file can not be"\
|
||||
" used with tests that specify using credentials by roles. "\
|
||||
"Either exclude/skip the tests doing this or use either an "\
|
||||
"test_accounts_file or tenant isolation."
|
||||
raise exceptions.InvalidConfiguration(msg)
|
|
@ -0,0 +1,39 @@
|
|||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def copy_file_to_host(file_from, dest, host, username, pkey):
|
||||
dest = "%s@%s:%s" % (username, host, dest)
|
||||
cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
|
||||
"-o StrictHostKeyChecking=no " \
|
||||
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
|
||||
'file1': file_from,
|
||||
'dest': dest}
|
||||
args = shlex.split(cmd.encode('utf-8'))
|
||||
subprocess_args = {'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.STDOUT}
|
||||
proc = subprocess.Popen(args, **subprocess_args)
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
LOG.error(("Command {0} returned with exit status {1},"
|
||||
"output {2}, error {3}").format(cmd, proc.returncode,
|
||||
stdout, stderr))
|
||||
return stdout
|
|
@ -0,0 +1,173 @@
|
|||
# Copyright (c) 2014 Deutsche Telekom AG
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from tempest_lib import auth
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Type of credentials available from configuration
|
||||
CREDENTIAL_TYPES = {
|
||||
'identity_admin': ('identity', 'admin'),
|
||||
'user': ('identity', None),
|
||||
'alt_user': ('identity', 'alt')
|
||||
}
|
||||
|
||||
DEFAULT_PARAMS = {
|
||||
'disable_ssl_certificate_validation':
|
||||
CONF.identity.disable_ssl_certificate_validation,
|
||||
'ca_certs': CONF.identity.ca_certificates_file,
|
||||
'trace_requests': CONF.debug.trace_requests
|
||||
}
|
||||
|
||||
|
||||
# Read credentials from configuration, builds a Credentials object
|
||||
# based on the specified or configured version
|
||||
def get_configured_credentials(credential_type, fill_in=True,
|
||||
identity_version=None):
|
||||
identity_version = identity_version or CONF.identity.auth_version
|
||||
if identity_version not in ('v2', 'v3'):
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'Unsupported auth version: %s' % identity_version)
|
||||
if credential_type not in CREDENTIAL_TYPES:
|
||||
raise exceptions.InvalidCredentials()
|
||||
conf_attributes = ['username', 'password', 'tenant_name']
|
||||
if identity_version == 'v3':
|
||||
conf_attributes.append('domain_name')
|
||||
# Read the parts of credentials from config
|
||||
params = DEFAULT_PARAMS.copy()
|
||||
section, prefix = CREDENTIAL_TYPES[credential_type]
|
||||
for attr in conf_attributes:
|
||||
_section = getattr(CONF, section)
|
||||
if prefix is None:
|
||||
params[attr] = getattr(_section, attr)
|
||||
else:
|
||||
params[attr] = getattr(_section, prefix + "_" + attr)
|
||||
# Build and validate credentials. We are reading configured credentials,
|
||||
# so validate them even if fill_in is False
|
||||
credentials = get_credentials(fill_in=fill_in,
|
||||
identity_version=identity_version, **params)
|
||||
if not fill_in:
|
||||
if not credentials.is_valid():
|
||||
msg = ("The %s credentials are incorrectly set in the config file."
|
||||
" Double check that all required values are assigned" %
|
||||
credential_type)
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
return credentials
|
||||
|
||||
|
||||
# Wrapper around auth.get_credentials to use the configured identity version
|
||||
# is none is specified
|
||||
def get_credentials(fill_in=True, identity_version=None, **kwargs):
|
||||
params = dict(DEFAULT_PARAMS, **kwargs)
|
||||
identity_version = identity_version or CONF.identity.auth_version
|
||||
# In case of "v3" add the domain from config if not specified
|
||||
if identity_version == 'v3':
|
||||
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
|
||||
if 'domain' in x)
|
||||
if not domain_fields.intersection(kwargs.keys()):
|
||||
# TODO(andreaf) It might be better here to use a dedicated config
|
||||
# option such as CONF.auth.tenant_isolation_domain_name
|
||||
params['user_domain_name'] = CONF.identity.admin_domain_name
|
||||
auth_url = CONF.identity.uri_v3
|
||||
else:
|
||||
auth_url = CONF.identity.uri
|
||||
return auth.get_credentials(auth_url,
|
||||
fill_in=fill_in,
|
||||
identity_version=identity_version,
|
||||
**params)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class CredentialProvider(object):
|
||||
def __init__(self, identity_version=None, name=None, password='pass',
|
||||
network_resources=None):
|
||||
"""A CredentialProvider supplies credentials to test classes.
|
||||
:param identity_version If specified it will return credentials of the
|
||||
corresponding identity version, otherwise it
|
||||
uses auth_version from configuration
|
||||
:param name Name of the calling test. Included in provisioned
|
||||
credentials when credentials are provisioned on the fly
|
||||
:param password Used for provisioned credentials when credentials are
|
||||
provisioned on the fly
|
||||
:param network_resources Network resources required for the credentials
|
||||
"""
|
||||
# TODO(andreaf) name and password are tenant isolation specific, and
|
||||
# could be removed from this abstract class
|
||||
self.name = name or "test_creds"
|
||||
self.identity_version = identity_version or CONF.identity.auth_version
|
||||
if not auth.is_identity_version_supported(self.identity_version):
|
||||
raise exceptions.InvalidIdentityVersion(
|
||||
identity_version=self.identity_version)
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_primary_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_admin_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_alt_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear_isolated_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_multi_user(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_multi_tenant(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_role_available(self, role):
|
||||
return
|
||||
|
||||
|
||||
class TestResources(object):
|
||||
"""Readonly Credentials, with network resources added."""
|
||||
|
||||
def __init__(self, credentials):
|
||||
self._credentials = credentials
|
||||
self.network = None
|
||||
self.subnet = None
|
||||
self.router = None
|
||||
|
||||
def __getattr__(self, item):
|
||||
return getattr(self._credentials, item)
|
||||
|
||||
def set_resources(self, **kwargs):
|
||||
for key in kwargs.keys():
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
@property
|
||||
def credentials(self):
|
||||
return self._credentials
|
|
@ -0,0 +1,93 @@
|
|||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import accounts
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
from neutron_lbaas.tests.tempest.lib.common import isolated_creds
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
# Return the right implementation of CredentialProvider based on config
|
||||
# Dropping interface and password, as they are never used anyways
|
||||
# TODO(andreaf) Drop them from the CredentialsProvider interface completely
|
||||
def get_isolated_credentials(name, network_resources=None,
|
||||
force_tenant_isolation=False,
|
||||
identity_version=None):
|
||||
# If a test requires a new account to work, it can have it via forcing
|
||||
# tenant isolation. A new account will be produced only for that test.
|
||||
# In case admin credentials are not available for the account creation,
|
||||
# the test should be skipped else it would fail.
|
||||
if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
|
||||
return isolated_creds.IsolatedCreds(
|
||||
name=name,
|
||||
network_resources=network_resources,
|
||||
identity_version=identity_version)
|
||||
else:
|
||||
if (CONF.auth.test_accounts_file and
|
||||
os.path.isfile(CONF.auth.test_accounts_file)):
|
||||
# Most params are not relevant for pre-created accounts
|
||||
return accounts.Accounts(name=name,
|
||||
identity_version=identity_version)
|
||||
else:
|
||||
return accounts.NotLockingAccounts(
|
||||
name=name, identity_version=identity_version)
|
||||
|
||||
|
||||
# We want a helper function here to check and see if admin credentials
|
||||
# are available so we can do a single call from skip_checks if admin
|
||||
# creds area vailable.
|
||||
def is_admin_available():
|
||||
is_admin = True
|
||||
# If tenant isolation is enabled admin will be available
|
||||
if CONF.auth.allow_tenant_isolation:
|
||||
return is_admin
|
||||
# Check whether test accounts file has the admin specified or not
|
||||
elif (CONF.auth.test_accounts_file and
|
||||
os.path.isfile(CONF.auth.test_accounts_file)):
|
||||
check_accounts = accounts.Accounts(name='check_admin')
|
||||
if not check_accounts.admin_available():
|
||||
is_admin = False
|
||||
else:
|
||||
try:
|
||||
cred_provider.get_configured_credentials('identity_admin',
|
||||
fill_in=False)
|
||||
except exceptions.InvalidConfiguration:
|
||||
is_admin = False
|
||||
return is_admin
|
||||
|
||||
|
||||
# We want a helper function here to check and see if alt credentials
|
||||
# are available so we can do a single call from skip_checks if alt
|
||||
# creds area vailable.
|
||||
def is_alt_available():
|
||||
# If tenant isolation is enabled admin will be available
|
||||
if CONF.auth.allow_tenant_isolation:
|
||||
return True
|
||||
# Check whether test accounts file has the admin specified or not
|
||||
if (CONF.auth.test_accounts_file and
|
||||
os.path.isfile(CONF.auth.test_accounts_file)):
|
||||
check_accounts = accounts.Accounts(name='check_alt')
|
||||
else:
|
||||
check_accounts = accounts.NotLockingAccounts(name='check_alt')
|
||||
try:
|
||||
if not check_accounts.is_multi_user():
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except exceptions.InvalidConfiguration:
|
||||
return False
|
|
@ -0,0 +1,227 @@
|
|||
# Copyright 2013 NTT Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
import six
|
||||
from testtools import helpers
|
||||
|
||||
|
||||
class ExistsAllResponseHeaders(object):
|
||||
"""
|
||||
Specific matcher to check the existence of Swift's response headers
|
||||
|
||||
This matcher checks the existence of common headers for each HTTP method
|
||||
or the target, which means account, container or object.
|
||||
When checking the existence of 'specific' headers such as
|
||||
X-Account-Meta-* or X-Object-Manifest for example, those headers must be
|
||||
checked in each test code.
|
||||
"""
|
||||
|
||||
def __init__(self, target, method):
|
||||
"""
|
||||
param: target Account/Container/Object
|
||||
param: method PUT/GET/HEAD/DELETE/COPY/POST
|
||||
"""
|
||||
self.target = target
|
||||
self.method = method
|
||||
|
||||
def match(self, actual):
|
||||
"""
|
||||
param: actual HTTP response headers
|
||||
"""
|
||||
# Check common headers for all HTTP methods
|
||||
if 'content-length' not in actual:
|
||||
return NonExistentHeader('content-length')
|
||||
if 'content-type' not in actual:
|
||||
return NonExistentHeader('content-type')
|
||||
if 'x-trans-id' not in actual:
|
||||
return NonExistentHeader('x-trans-id')
|
||||
if 'date' not in actual:
|
||||
return NonExistentHeader('date')
|
||||
|
||||
# Check headers for a specific method or target
|
||||
if self.method == 'GET' or self.method == 'HEAD':
|
||||
if 'x-timestamp' not in actual:
|
||||
return NonExistentHeader('x-timestamp')
|
||||
if 'accept-ranges' not in actual:
|
||||
return NonExistentHeader('accept-ranges')
|
||||
if self.target == 'Account':
|
||||
if 'x-account-bytes-used' not in actual:
|
||||
return NonExistentHeader('x-account-bytes-used')
|
||||
if 'x-account-container-count' not in actual:
|
||||
return NonExistentHeader('x-account-container-count')
|
||||
if 'x-account-object-count' not in actual:
|
||||
return NonExistentHeader('x-account-object-count')
|
||||
elif self.target == 'Container':
|
||||
if 'x-container-bytes-used' not in actual:
|
||||
return NonExistentHeader('x-container-bytes-used')
|
||||
if 'x-container-object-count' not in actual:
|
||||
return NonExistentHeader('x-container-object-count')
|
||||
elif self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
elif self.method == 'PUT':
|
||||
if self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
elif self.method == 'COPY':
|
||||
if self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
if 'x-copied-from' not in actual:
|
||||
return NonExistentHeader('x-copied-from')
|
||||
if 'x-copied-from-last-modified' not in actual:
|
||||
return NonExistentHeader('x-copied-from-last-modified')
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NonExistentHeader(object):
|
||||
"""
|
||||
Informs an error message for end users in the case of missing a
|
||||
certain header in Swift's responses
|
||||
"""
|
||||
|
||||
def __init__(self, header):
|
||||
self.header = header
|
||||
|
||||
def describe(self):
|
||||
return "%s header does not exist" % self.header
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
||||
|
||||
|
||||
class AreAllWellFormatted(object):
|
||||
"""
|
||||
Specific matcher to check the correctness of formats of values of Swift's
|
||||
response headers
|
||||
|
||||
This matcher checks the format of values of response headers.
|
||||
When checking the format of values of 'specific' headers such as
|
||||
X-Account-Meta-* or X-Object-Manifest for example, those values must be
|
||||
checked in each test code.
|
||||
"""
|
||||
|
||||
def match(self, actual):
|
||||
for key, value in six.iteritems(actual):
|
||||
if key in ('content-length', 'x-account-bytes-used',
|
||||
'x-account-container-count', 'x-account-object-count',
|
||||
'x-container-bytes-used', 'x-container-object-count')\
|
||||
and not value.isdigit():
|
||||
return InvalidFormat(key, value)
|
||||
elif key in ('content-type', 'date', 'last-modified',
|
||||
'x-copied-from-last-modified') and not value:
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-trans-id' and \
|
||||
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'accept-ranges' and not value == 'bytes':
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'etag' and not value.isalnum():
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'transfer-encoding' and not value == 'chunked':
|
||||
return InvalidFormat(key, value)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class InvalidFormat(object):
|
||||
"""
|
||||
Informs an error message for end users if a format of a certain header
|
||||
is invalid
|
||||
"""
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
def describe(self):
|
||||
return "InvalidFormat (%s, %s)" % (self.key, self.value)
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
||||
|
||||
|
||||
class MatchesDictExceptForKeys(object):
|
||||
"""Matches two dictionaries. Verifies all items are equals except for those
|
||||
identified by a list of keys.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, excluded_keys=None):
|
||||
self.expected = expected
|
||||
self.excluded_keys = excluded_keys if excluded_keys is not None else []
|
||||
|
||||
def match(self, actual):
|
||||
filtered_expected = helpers.dict_subtract(self.expected,
|
||||
self.excluded_keys)
|
||||
filtered_actual = helpers.dict_subtract(actual,
|
||||
self.excluded_keys)
|
||||
if filtered_actual != filtered_expected:
|
||||
return DictMismatch(filtered_expected, filtered_actual)
|
||||
|
||||
|
||||
class DictMismatch(object):
|
||||
"""Mismatch between two dicts describes deltas"""
|
||||
|
||||
def __init__(self, expected, actual):
|
||||
self.expected = expected
|
||||
self.actual = actual
|
||||
self.intersect = set(self.expected) & set(self.actual)
|
||||
self.symmetric_diff = set(self.expected) ^ set(self.actual)
|
||||
|
||||
def _format_dict(self, dict_to_format):
|
||||
# Ensure the error string dict is printed in a set order
|
||||
# NOTE(mtreinish): needed to ensure a deterministic error msg for
|
||||
# testing. Otherwise the error message will be dependent on the
|
||||
# dict ordering.
|
||||
dict_string = "{"
|
||||
for key in sorted(dict_to_format):
|
||||
dict_string += "'%s': %s, " % (key, dict_to_format[key])
|
||||
dict_string = dict_string[:-2] + '}'
|
||||
return dict_string
|
||||
|
||||
def describe(self):
|
||||
msg = ""
|
||||
if self.symmetric_diff:
|
||||
only_expected = helpers.dict_subtract(self.expected, self.actual)
|
||||
only_actual = helpers.dict_subtract(self.actual, self.expected)
|
||||
if only_expected:
|
||||
msg += "Only in expected:\n %s\n" % self._format_dict(
|
||||
only_expected)
|
||||
if only_actual:
|
||||
msg += "Only in actual:\n %s\n" % self._format_dict(
|
||||
only_actual)
|
||||
diff_set = set(o for o in self.intersect if
|
||||
self.expected[o] != self.actual[o])
|
||||
if diff_set:
|
||||
msg += "Differences:\n"
|
||||
for o in diff_set:
|
||||
msg += " %s: expected %s, actual %s\n" % (
|
||||
o, self.expected[o], self.actual[o])
|
||||
return msg
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
|
@ -0,0 +1,140 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest_lib.common.utils import misc as misc_utils
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_network_from_name(name, compute_networks_client):
|
||||
"""Get a full network dict from just a network name
|
||||
|
||||
:param str name: the name of the network to use
|
||||
:param NetworksClientJSON compute_networks_client: The network client
|
||||
object to use for making the network lists api request
|
||||
:return: The full dictionary for the network in question
|
||||
:rtype: dict
|
||||
:raises InvalidConfiguration: If the name provided is invalid, the networks
|
||||
list returns a 404, there are no found networks, or the found network
|
||||
is invalid
|
||||
"""
|
||||
caller = misc_utils.find_test_caller()
|
||||
|
||||
if not name:
|
||||
raise exceptions.InvalidConfiguration()
|
||||
|
||||
try:
|
||||
networks = compute_networks_client.list_networks(name=name)
|
||||
except lib_exc.NotFound:
|
||||
# In case of nova network, if the fixed_network_name is not
|
||||
# owned by the tenant, and the network client is not an admin
|
||||
# one, list_networks will not find it
|
||||
msg = ('Unable to find network %s. '
|
||||
'Starting instance without specifying a network.' %
|
||||
name)
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.info(msg)
|
||||
raise exceptions.InvalidConfiguration()
|
||||
|
||||
# Check that a network exists, else raise an InvalidConfigurationException
|
||||
if len(networks) == 1:
|
||||
network = sorted(networks)[0]
|
||||
elif len(networks) > 1:
|
||||
msg = ("Network with name: %s had multiple matching networks in the "
|
||||
"list response: %s\n Unable to specify a single network" % (
|
||||
name, networks))
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.warn(msg)
|
||||
raise exceptions.InvalidConfiguration()
|
||||
else:
|
||||
msg = "Network with name: %s not found" % name
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.warn(msg)
|
||||
raise exceptions.InvalidConfiguration()
|
||||
# To be consistent between neutron and nova network always use name even
|
||||
# if label is used in the api response. If neither is present than then
|
||||
# the returned network is invalid.
|
||||
name = network.get('name') or network.get('label')
|
||||
if not name:
|
||||
msg = "Network found from list doesn't contain a valid name or label"
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.warn(msg)
|
||||
raise exceptions.InvalidConfiguration()
|
||||
network['name'] = name
|
||||
return network
|
||||
|
||||
|
||||
def get_tenant_network(creds_provider, compute_networks_client):
|
||||
"""Get a network usable by the primary tenant
|
||||
|
||||
:param creds_provider: instance of credential provider
|
||||
:param compute_networks_client: compute network client. We want to have the
|
||||
compute network client so we can have use a common approach for both
|
||||
neutron and nova-network cases. If this is not an admin network
|
||||
client, set_network_kwargs might fail in case fixed_network_name
|
||||
is the network to be used, and it's not visible to the tenant
|
||||
:return a dict with 'id' and 'name' of the network
|
||||
"""
|
||||
caller = misc_utils.find_test_caller()
|
||||
fixed_network_name = CONF.compute.fixed_network_name
|
||||
net_creds = creds_provider.get_primary_creds()
|
||||
network = getattr(net_creds, 'network', None)
|
||||
if not network or not network.get('name'):
|
||||
if fixed_network_name:
|
||||
msg = ('No valid network provided or created, defaulting to '
|
||||
'fixed_network_name')
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.debug(msg)
|
||||
try:
|
||||
network = get_network_from_name(fixed_network_name,
|
||||
compute_networks_client)
|
||||
except exceptions.InvalidConfiguration:
|
||||
network = {}
|
||||
msg = ('Found network %s available for tenant' % network)
|
||||
if caller:
|
||||
msg = '(%s) %s' % (caller, msg)
|
||||
LOG.info(msg)
|
||||
return network
|
||||
|
||||
|
||||
def set_networks_kwarg(network, kwargs=None):
|
||||
"""Set 'networks' kwargs for a server create if missing
|
||||
|
||||
:param network: dict of network to be used with 'id' and 'name'
|
||||
:param kwargs: server create kwargs to be enhanced
|
||||
:return: new dict of kwargs updated to include networks
|
||||
"""
|
||||
params = copy.copy(kwargs) or {}
|
||||
if kwargs and 'networks' in kwargs:
|
||||
return params
|
||||
|
||||
if network:
|
||||
if 'id' in network.keys():
|
||||
params.update({"networks": [{'uuid': network['id']}]})
|
||||
else:
|
||||
LOG.warn('The provided network dict: %s was invalid and did not '
|
||||
' contain an id' % network)
|
||||
return params
|
|
@ -0,0 +1,182 @@
|
|||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import functools
|
||||
|
||||
import jsonschema
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _check_for_expected_result(name, schema):
|
||||
expected_result = None
|
||||
if "results" in schema:
|
||||
if name in schema["results"]:
|
||||
expected_result = schema["results"][name]
|
||||
return expected_result
|
||||
|
||||
|
||||
def generator_type(*args, **kwargs):
|
||||
def wrapper(func):
|
||||
func.types = args
|
||||
for key in kwargs:
|
||||
setattr(func, key, kwargs[key])
|
||||
return func
|
||||
return wrapper
|
||||
|
||||
|
||||
def simple_generator(fn):
|
||||
"""
|
||||
Decorator for simple generators that return one value
|
||||
"""
|
||||
@functools.wraps(fn)
|
||||
def wrapped(self, schema):
|
||||
result = fn(self, schema)
|
||||
if result is not None:
|
||||
expected_result = _check_for_expected_result(fn.__name__, schema)
|
||||
return (fn.__name__, result, expected_result)
|
||||
return
|
||||
return wrapped
|
||||
|
||||
|
||||
class BasicGeneratorSet(object):
|
||||
_instance = None
|
||||
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"http-method": {
|
||||
"enum": ["GET", "PUT", "HEAD",
|
||||
"POST", "PATCH", "DELETE", 'COPY']
|
||||
},
|
||||
"admin_client": {"type": "boolean"},
|
||||
"url": {"type": "string"},
|
||||
"default_result_code": {"type": "integer"},
|
||||
"json-schema": {},
|
||||
"resources": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"expected_result": {"type": "integer"}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"results": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
},
|
||||
"required": ["name", "http-method", "url"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.types_dict = {}
|
||||
for m in dir(self):
|
||||
if callable(getattr(self, m)) and not'__' in m:
|
||||
method = getattr(self, m)
|
||||
if hasattr(method, "types"):
|
||||
for type in method.types:
|
||||
if type not in self.types_dict:
|
||||
self.types_dict[type] = []
|
||||
self.types_dict[type].append(method)
|
||||
|
||||
def validate_schema(self, schema):
|
||||
if "json-schema" in schema:
|
||||
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
|
||||
jsonschema.validate(schema, self.schema)
|
||||
|
||||
def generate_scenarios(self, schema, path=None):
|
||||
"""
|
||||
Generates the scenario (all possible test cases) out of the given
|
||||
schema.
|
||||
|
||||
:param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
|
||||
:param path: the schema path if the given schema is a subschema
|
||||
"""
|
||||
schema_type = schema['type']
|
||||
scenarios = []
|
||||
|
||||
if schema_type == 'object':
|
||||
properties = schema["properties"]
|
||||
for attribute, definition in properties.iteritems():
|
||||
current_path = copy.copy(path)
|
||||
if path is not None:
|
||||
current_path.append(attribute)
|
||||
else:
|
||||
current_path = [attribute]
|
||||
scenarios.extend(
|
||||
self.generate_scenarios(definition, current_path))
|
||||
elif isinstance(schema_type, list):
|
||||
if "integer" in schema_type:
|
||||
schema_type = "integer"
|
||||
else:
|
||||
raise Exception("non-integer list types not supported")
|
||||
for generator in self.types_dict[schema_type]:
|
||||
if hasattr(generator, "needed_property"):
|
||||
prop = generator.needed_property
|
||||
if (prop not in schema or
|
||||
schema[prop] is None or
|
||||
schema[prop] is False):
|
||||
continue
|
||||
|
||||
name = generator.__name__
|
||||
if ("exclude_tests" in schema and
|
||||
name in schema["exclude_tests"]):
|
||||
continue
|
||||
if path is not None:
|
||||
name = "%s_%s" % ("_".join(path), name)
|
||||
scenarios.append({
|
||||
"_negtest_name": name,
|
||||
"_negtest_generator": generator,
|
||||
"_negtest_schema": schema,
|
||||
"_negtest_path": path})
|
||||
return scenarios
|
||||
|
||||
def generate_payload(self, test, schema):
|
||||
"""
|
||||
Generates one jsonschema out of the given test. It's mandatory to use
|
||||
generate_scenarios before to register all needed variables to the test.
|
||||
|
||||
:param test: A test object (scenario) with all _negtest variables on it
|
||||
:param schema: schema for the test
|
||||
"""
|
||||
generator = test._negtest_generator
|
||||
ret = generator(test._negtest_schema)
|
||||
path = copy.copy(test._negtest_path)
|
||||
expected_result = None
|
||||
|
||||
if ret is not None:
|
||||
generator_result = generator(test._negtest_schema)
|
||||
invalid_snippet = generator_result[1]
|
||||
expected_result = generator_result[2]
|
||||
element = path.pop()
|
||||
if len(path) > 0:
|
||||
schema_snip = reduce(dict.get, path, schema)
|
||||
schema_snip[element] = invalid_snippet
|
||||
else:
|
||||
schema[element] = invalid_snippet
|
||||
return expected_result
|
|
@ -0,0 +1,79 @@
|
|||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import neutron_lbaas.tests.tempest.lib.common.generator.base_generator as base
|
||||
import neutron_lbaas.tests.tempest.lib.common.generator.valid_generator as valid
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NegativeTestGenerator(base.BasicGeneratorSet):
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def gen_int(self, _):
|
||||
return 4
|
||||
|
||||
@base.generator_type("integer")
|
||||
@base.simple_generator
|
||||
def gen_string(self, _):
|
||||
return "XXXXXX"
|
||||
|
||||
@base.generator_type("integer", "string")
|
||||
def gen_none(self, schema):
|
||||
# Note(mkoderer): it's not using the decorator otherwise it'd be
|
||||
# filtered
|
||||
expected_result = base._check_for_expected_result('gen_none', schema)
|
||||
return ('gen_none', None, expected_result)
|
||||
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def gen_str_min_length(self, schema):
|
||||
min_length = schema.get("minLength", 0)
|
||||
if min_length > 0:
|
||||
return "x" * (min_length - 1)
|
||||
|
||||
@base.generator_type("string", needed_property="maxLength")
|
||||
@base.simple_generator
|
||||
def gen_str_max_length(self, schema):
|
||||
max_length = schema.get("maxLength", -1)
|
||||
return "x" * (max_length + 1)
|
||||
|
||||
@base.generator_type("integer", needed_property="minimum")
|
||||
@base.simple_generator
|
||||
def gen_int_min(self, schema):
|
||||
minimum = schema["minimum"]
|
||||
if "exclusiveMinimum" not in schema:
|
||||
minimum -= 1
|
||||
return minimum
|
||||
|
||||
@base.generator_type("integer", needed_property="maximum")
|
||||
@base.simple_generator
|
||||
def gen_int_max(self, schema):
|
||||
maximum = schema["maximum"]
|
||||
if "exclusiveMaximum" not in schema:
|
||||
maximum += 1
|
||||
return maximum
|
||||
|
||||
@base.generator_type("object", needed_property="additionalProperties")
|
||||
@base.simple_generator
|
||||
def gen_obj_add_attr(self, schema):
|
||||
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
|
||||
new_valid = copy.deepcopy(valid_schema)
|
||||
new_valid["$$$$$$$$$$"] = "xxx"
|
||||
return new_valid
|
|
@ -0,0 +1,82 @@
|
|||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import neutron_lbaas.tests.tempest.lib.common.generator.base_generator as base
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ValidTestGenerator(base.BasicGeneratorSet):
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def generate_valid_string(self, schema):
|
||||
size = schema.get("minLength", 1)
|
||||
# TODO(dkr mko): handle format and pattern
|
||||
return "x" * size
|
||||
|
||||
@base.generator_type("integer")
|
||||
@base.simple_generator
|
||||
def generate_valid_integer(self, schema):
|
||||
# TODO(dkr mko): handle multipleOf
|
||||
if "minimum" in schema:
|
||||
minimum = schema["minimum"]
|
||||
if "exclusiveMinimum" not in schema:
|
||||
return minimum
|
||||
else:
|
||||
return minimum + 1
|
||||
if "maximum" in schema:
|
||||
maximum = schema["maximum"]
|
||||
if "exclusiveMaximum" not in schema:
|
||||
return maximum
|
||||
else:
|
||||
return maximum - 1
|
||||
return 0
|
||||
|
||||
@base.generator_type("object")
|
||||
@base.simple_generator
|
||||
def generate_valid_object(self, schema):
|
||||
obj = {}
|
||||
for k, v in schema["properties"].iteritems():
|
||||
obj[k] = self.generate_valid(v)
|
||||
return obj
|
||||
|
||||
def generate(self, schema):
|
||||
schema_type = schema["type"]
|
||||
if isinstance(schema_type, list):
|
||||
if "integer" in schema_type:
|
||||
schema_type = "integer"
|
||||
else:
|
||||
raise Exception("non-integer list types not supported")
|
||||
result = []
|
||||
if schema_type not in self.types_dict:
|
||||
raise TypeError("generator (%s) doesn't support type: %s"
|
||||
% (self.__class__.__name__, schema_type))
|
||||
for generator in self.types_dict[schema_type]:
|
||||
ret = generator(schema)
|
||||
if ret is not None:
|
||||
if isinstance(ret, list):
|
||||
result.extend(ret)
|
||||
elif isinstance(ret, tuple):
|
||||
result.append(ret)
|
||||
else:
|
||||
raise Exception("generator (%s) returns invalid result: %s"
|
||||
% (generator, ret))
|
||||
return result
|
||||
|
||||
def generate_valid(self, schema):
|
||||
return self.generate(schema)[0][1]
|
|
@ -0,0 +1,376 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Originally copied from python-glanceclient
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import posixpath
|
||||
import re
|
||||
import socket
|
||||
import struct
|
||||
|
||||
import OpenSSL
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from six import moves
|
||||
from six.moves import http_client as httplib
|
||||
from six.moves.urllib import parse as urlparse
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions as exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
USER_AGENT = 'tempest'
|
||||
CHUNKSIZE = 1024 * 64 # 64kB
|
||||
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
|
||||
def __init__(self, auth_provider, filters, **kwargs):
|
||||
self.auth_provider = auth_provider
|
||||
self.filters = filters
|
||||
self.endpoint = auth_provider.base_url(filters)
|
||||
endpoint_parts = urlparse.urlparse(self.endpoint)
|
||||
self.endpoint_scheme = endpoint_parts.scheme
|
||||
self.endpoint_hostname = endpoint_parts.hostname
|
||||
self.endpoint_port = endpoint_parts.port
|
||||
self.endpoint_path = endpoint_parts.path
|
||||
|
||||
self.connection_class = self.get_connection_class(self.endpoint_scheme)
|
||||
self.connection_kwargs = self.get_connection_kwargs(
|
||||
self.endpoint_scheme, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def get_connection_class(scheme):
|
||||
if scheme == 'https':
|
||||
return VerifiedHTTPSConnection
|
||||
else:
|
||||
return httplib.HTTPConnection
|
||||
|
||||
@staticmethod
|
||||
def get_connection_kwargs(scheme, **kwargs):
|
||||
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
|
||||
|
||||
if scheme == 'https':
|
||||
_kwargs['ca_certs'] = kwargs.get('ca_certs', None)
|
||||
_kwargs['cert_file'] = kwargs.get('cert_file', None)
|
||||
_kwargs['key_file'] = kwargs.get('key_file', None)
|
||||
_kwargs['insecure'] = kwargs.get('insecure', False)
|
||||
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
|
||||
|
||||
return _kwargs
|
||||
|
||||
def get_connection(self):
|
||||
_class = self.connection_class
|
||||
try:
|
||||
return _class(self.endpoint_hostname, self.endpoint_port,
|
||||
**self.connection_kwargs)
|
||||
except httplib.InvalidURL:
|
||||
raise exc.EndpointNotFound
|
||||
|
||||
def _http_request(self, url, method, **kwargs):
|
||||
"""Send an http request with the specified characteristics.
|
||||
|
||||
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
|
||||
as setting headers and error handling.
|
||||
"""
|
||||
# Copy the kwargs so we can reuse the original in case of redirects
|
||||
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
|
||||
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
|
||||
|
||||
self._log_request(method, url, kwargs['headers'])
|
||||
|
||||
conn = self.get_connection()
|
||||
|
||||
try:
|
||||
url_parts = urlparse.urlparse(url)
|
||||
conn_url = posixpath.normpath(url_parts.path)
|
||||
LOG.debug('Actual Path: {path}'.format(path=conn_url))
|
||||
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
|
||||
conn.putrequest(method, conn_url)
|
||||
for header, value in kwargs['headers'].items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
chunk = kwargs['body'].read(CHUNKSIZE)
|
||||
# Chunk it, baby...
|
||||
while chunk:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
chunk = kwargs['body'].read(CHUNKSIZE)
|
||||
conn.send('0\r\n\r\n')
|
||||
else:
|
||||
conn.request(method, conn_url, **kwargs)
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror as e:
|
||||
message = ("Error finding address for %(url)s: %(e)s" %
|
||||
{'url': url, 'e': e})
|
||||
raise exc.EndpointNotFound(message)
|
||||
except (socket.error, socket.timeout) as e:
|
||||
message = ("Error communicating with %(endpoint)s %(e)s" %
|
||||
{'endpoint': self.endpoint, 'e': e})
|
||||
raise exc.TimeoutException(message)
|
||||
|
||||
body_iter = ResponseBodyIterator(resp)
|
||||
# Read body into string if it isn't obviously image data
|
||||
if resp.getheader('content-type', None) != 'application/octet-stream':
|
||||
body_str = ''.join([body_chunk for body_chunk in body_iter])
|
||||
body_iter = six.StringIO(body_str)
|
||||
self._log_response(resp, None)
|
||||
else:
|
||||
self._log_response(resp, body_iter)
|
||||
|
||||
return resp, body_iter
|
||||
|
||||
def _log_request(self, method, url, headers):
|
||||
LOG.info('Request: ' + method + ' ' + url)
|
||||
if headers:
|
||||
headers_out = headers
|
||||
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
|
||||
token = headers['X-Auth-Token']
|
||||
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
|
||||
headers_out = headers.copy()
|
||||
headers_out['X-Auth-Token'] = "<Token omitted>"
|
||||
LOG.info('Request Headers: ' + str(headers_out))
|
||||
|
||||
def _log_response(self, resp, body):
|
||||
status = str(resp.status)
|
||||
LOG.info("Response Status: " + status)
|
||||
if resp.getheaders():
|
||||
LOG.info('Response Headers: ' + str(resp.getheaders()))
|
||||
if body:
|
||||
str_body = str(body)
|
||||
length = len(body)
|
||||
LOG.info('Response Body: ' + str_body[:2048])
|
||||
if length >= 2048:
|
||||
self.LOG.debug("Large body (%d) md5 summary: %s", length,
|
||||
hashlib.md5(str_body).hexdigest())
|
||||
|
||||
def json_request(self, method, url, **kwargs):
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers'].setdefault('Content-Type', 'application/json')
|
||||
if kwargs['headers']['Content-Type'] != 'application/json':
|
||||
msg = "Only application/json content-type is supported."
|
||||
raise lib_exc.InvalidContentType(msg)
|
||||
|
||||
if 'body' in kwargs:
|
||||
kwargs['body'] = json.dumps(kwargs['body'])
|
||||
|
||||
resp, body_iter = self._http_request(url, method, **kwargs)
|
||||
|
||||
if 'application/json' in resp.getheader('content-type', ''):
|
||||
body = ''.join([chunk for chunk in body_iter])
|
||||
try:
|
||||
body = json.loads(body)
|
||||
except ValueError:
|
||||
LOG.error('Could not decode response body as JSON')
|
||||
else:
|
||||
msg = "Only json/application content-type is supported."
|
||||
raise lib_exc.InvalidContentType(msg)
|
||||
|
||||
return resp, body
|
||||
|
||||
def raw_request(self, method, url, **kwargs):
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers'].setdefault('Content-Type',
|
||||
'application/octet-stream')
|
||||
if 'body' in kwargs:
|
||||
if (hasattr(kwargs['body'], 'read')
|
||||
and method.lower() in ('post', 'put')):
|
||||
# We use 'Transfer-Encoding: chunked' because
|
||||
# body size may not always be known in advance.
|
||||
kwargs['headers']['Transfer-Encoding'] = 'chunked'
|
||||
|
||||
# Decorate the request with auth
|
||||
req_url, kwargs['headers'], kwargs['body'] = \
|
||||
self.auth_provider.auth_request(
|
||||
method=method, url=url, headers=kwargs['headers'],
|
||||
body=kwargs.get('body', None), filters=self.filters)
|
||||
return self._http_request(req_url, method, **kwargs)
|
||||
|
||||
|
||||
class OpenSSLConnectionDelegator(object):
|
||||
"""
|
||||
An OpenSSL.SSL.Connection delegator.
|
||||
|
||||
Supplies an additional 'makefile' method which httplib requires
|
||||
and is not present in OpenSSL.SSL.Connection.
|
||||
|
||||
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
|
||||
a delegator must be used.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.connection, name)
|
||||
|
||||
def makefile(self, *args, **kwargs):
|
||||
# Ensure the socket is closed when this file is closed
|
||||
kwargs['close'] = True
|
||||
return socket._fileobject(self.connection, *args, **kwargs)
|
||||
|
||||
|
||||
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
|
||||
"""
|
||||
Extended HTTPSConnection which uses the OpenSSL library
|
||||
for enhanced SSL support.
|
||||
Note: Much of this functionality can eventually be replaced
|
||||
with native Python 3.3 code.
|
||||
"""
|
||||
def __init__(self, host, port=None, key_file=None, cert_file=None,
|
||||
ca_certs=None, timeout=None, insecure=False,
|
||||
ssl_compression=True):
|
||||
httplib.HTTPSConnection.__init__(self, host, port,
|
||||
key_file=key_file,
|
||||
cert_file=cert_file)
|
||||
self.key_file = key_file
|
||||
self.cert_file = cert_file
|
||||
self.timeout = timeout
|
||||
self.insecure = insecure
|
||||
self.ssl_compression = ssl_compression
|
||||
self.ca_certs = ca_certs
|
||||
self.setcontext()
|
||||
|
||||
@staticmethod
|
||||
def host_matches_cert(host, x509):
|
||||
"""
|
||||
Verify that the the x509 certificate we have received
|
||||
from 'host' correctly identifies the server we are
|
||||
connecting to, ie that the certificate's Common Name
|
||||
or a Subject Alternative Name matches 'host'.
|
||||
"""
|
||||
# First see if we can match the CN
|
||||
if x509.get_subject().commonName == host:
|
||||
return True
|
||||
|
||||
# Also try Subject Alternative Names for a match
|
||||
san_list = None
|
||||
for i in moves.xrange(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
if ext.get_short_name() == 'subjectAltName':
|
||||
san_list = str(ext)
|
||||
for san in ''.join(san_list.split()).split(','):
|
||||
if san == "DNS:%s" % host:
|
||||
return True
|
||||
|
||||
# Server certificate does not match host
|
||||
msg = ('Host "%s" does not match x509 certificate contents: '
|
||||
'CommonName "%s"' % (host, x509.get_subject().commonName))
|
||||
if san_list is not None:
|
||||
msg = msg + ', subjectAltName "%s"' % san_list
|
||||
raise exc.SSLCertificateError(msg)
|
||||
|
||||
def verify_callback(self, connection, x509, errnum,
|
||||
depth, preverify_ok):
|
||||
if x509.has_expired():
|
||||
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
|
||||
raise exc.SSLCertificateError(msg)
|
||||
|
||||
if depth == 0 and preverify_ok is True:
|
||||
# We verify that the host matches against the last
|
||||
# certificate in the chain
|
||||
return self.host_matches_cert(self.host, x509)
|
||||
else:
|
||||
# Pass through OpenSSL's default result
|
||||
return preverify_ok
|
||||
|
||||
def setcontext(self):
|
||||
"""
|
||||
Set up the OpenSSL context.
|
||||
"""
|
||||
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
|
||||
|
||||
if self.ssl_compression is False:
|
||||
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
|
||||
|
||||
if self.insecure is not True:
|
||||
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
|
||||
self.verify_callback)
|
||||
else:
|
||||
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
|
||||
self.verify_callback)
|
||||
|
||||
if self.cert_file:
|
||||
try:
|
||||
self.context.use_certificate_file(self.cert_file)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
if self.key_file is None:
|
||||
# We support having key and cert in same file
|
||||
try:
|
||||
self.context.use_privatekey_file(self.cert_file)
|
||||
except Exception as e:
|
||||
msg = ('No key file specified and unable to load key '
|
||||
'from "%s" %s' % (self.cert_file, e))
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
|
||||
if self.key_file:
|
||||
try:
|
||||
self.context.use_privatekey_file(self.key_file)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
|
||||
if self.ca_certs:
|
||||
try:
|
||||
self.context.load_verify_locations(self.ca_certs)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
else:
|
||||
self.context.set_default_verify_paths()
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connect to an SSL port using the OpenSSL library and apply
|
||||
per-connection parameters.
|
||||
"""
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if self.timeout is not None:
|
||||
# '0' microseconds
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
|
||||
struct.pack('LL', self.timeout, 0))
|
||||
self.sock = OpenSSLConnectionDelegator(self.context, sock)
|
||||
self.sock.connect((self.host, self.port))
|
||||
|
||||
def close(self):
|
||||
if self.sock:
|
||||
# Remove the reference to the socket but don't close it yet.
|
||||
# Response close will close both socket and associated
|
||||
# file. Closing socket too soon will cause response
|
||||
# reads to fail with socket IO error 'Bad file descriptor'.
|
||||
self.sock = None
|
||||
httplib.HTTPSConnection.close(self)
|
||||
|
||||
|
||||
class ResponseBodyIterator(object):
|
||||
"""A class that acts as an iterator over an HTTP response."""
|
||||
|
||||
def __init__(self, resp):
|
||||
self.resp = resp
|
||||
|
||||
def __iter__(self):
|
||||
while True:
|
||||
yield self.next()
|
||||
|
||||
def next(self):
|
||||
chunk = self.resp.read(CHUNKSIZE)
|
||||
if chunk:
|
||||
return chunk
|
||||
else:
|
||||
raise StopIteration()
|
|
@ -0,0 +1,438 @@
|
|||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import clients
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v2.json import identity_client as v2_identity
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class CredsClient(object):
|
||||
"""This class is a wrapper around the identity clients, to provide a
|
||||
single interface for managing credentials in both v2 and v3 cases.
|
||||
It's not bound to created credentials, only to a specific set of admin
|
||||
credentials used for generating credentials.
|
||||
"""
|
||||
|
||||
def __init__(self, identity_client):
|
||||
# The client implies version and credentials
|
||||
self.identity_client = identity_client
|
||||
self.credentials = self.identity_client.auth_provider.credentials
|
||||
|
||||
def create_user(self, username, password, project, email):
|
||||
user = self.identity_client.create_user(
|
||||
username, password, project['id'], email)
|
||||
return user
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_project(self, name, description):
|
||||
pass
|
||||
|
||||
def assign_user_role(self, user, project, role_name):
|
||||
try:
|
||||
roles = self._list_roles()
|
||||
role = next(r for r in roles if r['name'] == role_name)
|
||||
except StopIteration:
|
||||
msg = 'No "%s" role found' % role_name
|
||||
raise lib_exc.NotFound(msg)
|
||||
try:
|
||||
self.identity_client.assign_user_role(project['id'], user['id'],
|
||||
role['id'])
|
||||
except lib_exc.Conflict:
|
||||
LOG.debug("Role %s already assigned on project %s for user %s" % (
|
||||
role['id'], project['id'], user['id']))
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_credentials(self, user, project, password):
|
||||
pass
|
||||
|
||||
def delete_user(self, user_id):
|
||||
self.identity_client.delete_user(user_id)
|
||||
|
||||
def _list_roles(self):
|
||||
roles = self.identity_client.list_roles()
|
||||
return roles
|
||||
|
||||
|
||||
class V2CredsClient(CredsClient):
|
||||
|
||||
def create_project(self, name, description):
|
||||
tenant = self.identity_client.create_tenant(
|
||||
name=name, description=description)
|
||||
return tenant
|
||||
|
||||
def get_credentials(self, user, project, password):
|
||||
return cred_provider.get_credentials(
|
||||
identity_version='v2',
|
||||
username=user['name'], user_id=user['id'],
|
||||
tenant_name=project['name'], tenant_id=project['id'],
|
||||
password=password)
|
||||
|
||||
def delete_project(self, project_id):
|
||||
self.identity_client.delete_tenant(project_id)
|
||||
|
||||
|
||||
class V3CredsClient(CredsClient):
|
||||
|
||||
def __init__(self, identity_client, domain_name):
|
||||
super(V3CredsClient, self).__init__(identity_client)
|
||||
try:
|
||||
# Domain names must be unique, in any case a list is returned,
|
||||
# selecting the first (and only) element
|
||||
self.creds_domain = self.identity_client.list_domains(
|
||||
params={'name': domain_name})[0]
|
||||
except lib_exc.NotFound:
|
||||
# TODO(andrea) we could probably create the domain on the fly
|
||||
msg = "Configured domain %s could not be found" % domain_name
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
def create_project(self, name, description):
|
||||
project = self.identity_client.create_project(
|
||||
name=name, description=description,
|
||||
domain_id=self.creds_domain['id'])
|
||||
return project
|
||||
|
||||
def get_credentials(self, user, project, password):
|
||||
return cred_provider.get_credentials(
|
||||
identity_version='v3',
|
||||
username=user['name'], user_id=user['id'],
|
||||
project_name=project['name'], project_id=project['id'],
|
||||
password=password,
|
||||
project_domain_name=self.creds_domain['name'])
|
||||
|
||||
def delete_project(self, project_id):
|
||||
self.identity_client.delete_project(project_id)
|
||||
|
||||
|
||||
def get_creds_client(identity_client, project_domain_name=None):
|
||||
if isinstance(identity_client, v2_identity.IdentityClientJSON):
|
||||
return V2CredsClient(identity_client)
|
||||
else:
|
||||
return V3CredsClient(identity_client, project_domain_name)
|
||||
|
||||
|
||||
class IsolatedCreds(cred_provider.CredentialProvider):
|
||||
|
||||
def __init__(self, identity_version=None, name=None, password='pass',
|
||||
network_resources=None):
|
||||
super(IsolatedCreds, self).__init__(identity_version, name, password,
|
||||
network_resources)
|
||||
self.network_resources = network_resources
|
||||
self.isolated_creds = {}
|
||||
self.ports = []
|
||||
self.password = password
|
||||
self.default_admin_creds = cred_provider.get_configured_credentials(
|
||||
'identity_admin', fill_in=True,
|
||||
identity_version=self.identity_version)
|
||||
self.identity_admin_client, self.network_admin_client = (
|
||||
self._get_admin_clients())
|
||||
# Domain where isolated credentials are provisioned (v3 only).
|
||||
# Use that of the admin account is None is configured.
|
||||
self.creds_domain_name = None
|
||||
if self.identity_version == 'v3':
|
||||
self.creds_domain_name = (
|
||||
CONF.auth.tenant_isolation_domain_name or
|
||||
self.default_admin_creds.project_domain_name)
|
||||
self.creds_client = get_creds_client(
|
||||
self.identity_admin_client, self.creds_domain_name)
|
||||
|
||||
def _get_admin_clients(self):
|
||||
"""
|
||||
Returns a tuple with instances of the following admin clients (in this
|
||||
order):
|
||||
identity
|
||||
network
|
||||
"""
|
||||
os = clients.Manager(self.default_admin_creds)
|
||||
if self.identity_version == 'v2':
|
||||
return os.identity_client, os.network_client
|
||||
else:
|
||||
return os.identity_v3_client, os.network_client
|
||||
|
||||
def _create_creds(self, suffix="", admin=False, roles=None):
|
||||
"""Create random credentials under the following schema.
|
||||
|
||||
If the name contains a '.' is the full class path of something, and
|
||||
we don't really care. If it isn't, it's probably a meaningful name,
|
||||
so use it.
|
||||
|
||||
For logging purposes, -user and -tenant are long and redundant,
|
||||
don't use them. The user# will be sufficient to figure it out.
|
||||
"""
|
||||
if '.' in self.name:
|
||||
root = ""
|
||||
else:
|
||||
root = self.name
|
||||
|
||||
project_name = data_utils.rand_name(root) + suffix
|
||||
project_desc = project_name + "-desc"
|
||||
project = self.creds_client.create_project(
|
||||
name=project_name, description=project_desc)
|
||||
|
||||
username = data_utils.rand_name(root) + suffix
|
||||
email = data_utils.rand_name(root) + suffix + "@example.com"
|
||||
user = self.creds_client.create_user(
|
||||
username, self.password, project, email)
|
||||
if admin:
|
||||
self.creds_client.assign_user_role(user, project,
|
||||
CONF.identity.admin_role)
|
||||
# Add roles specified in config file
|
||||
for conf_role in CONF.auth.tempest_roles:
|
||||
self.creds_client.assign_user_role(user, project, conf_role)
|
||||
# Add roles requested by caller
|
||||
if roles:
|
||||
for role in roles:
|
||||
self.creds_client.assign_user_role(user, project, role)
|
||||
creds = self.creds_client.get_credentials(user, project, self.password)
|
||||
return cred_provider.TestResources(creds)
|
||||
|
||||
def _create_network_resources(self, tenant_id):
|
||||
network = None
|
||||
subnet = None
|
||||
router = None
|
||||
# Make sure settings
|
||||
if self.network_resources:
|
||||
if self.network_resources['router']:
|
||||
if (not self.network_resources['subnet'] or
|
||||
not self.network_resources['network']):
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'A router requires a subnet and network')
|
||||
elif self.network_resources['subnet']:
|
||||
if not self.network_resources['network']:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'A subnet requires a network')
|
||||
elif self.network_resources['dhcp']:
|
||||
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
|
||||
|
||||
data_utils.rand_name_root = data_utils.rand_name(self.name)
|
||||
if not self.network_resources or self.network_resources['network']:
|
||||
network_name = data_utils.rand_name_root + "-network"
|
||||
network = self._create_network(network_name, tenant_id)
|
||||
try:
|
||||
if not self.network_resources or self.network_resources['subnet']:
|
||||
subnet_name = data_utils.rand_name_root + "-subnet"
|
||||
subnet = self._create_subnet(subnet_name, tenant_id,
|
||||
network['id'])
|
||||
if not self.network_resources or self.network_resources['router']:
|
||||
router_name = data_utils.rand_name_root + "-router"
|
||||
router = self._create_router(router_name, tenant_id)
|
||||
self._add_router_interface(router['id'], subnet['id'])
|
||||
except Exception:
|
||||
if router:
|
||||
self._clear_isolated_router(router['id'], router['name'])
|
||||
if subnet:
|
||||
self._clear_isolated_subnet(subnet['id'], subnet['name'])
|
||||
if network:
|
||||
self._clear_isolated_network(network['id'], network['name'])
|
||||
raise
|
||||
return network, subnet, router
|
||||
|
||||
def _create_network(self, name, tenant_id):
|
||||
resp_body = self.network_admin_client.create_network(
|
||||
name=name, tenant_id=tenant_id)
|
||||
return resp_body['network']
|
||||
|
||||
def _create_subnet(self, subnet_name, tenant_id, network_id):
|
||||
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = CONF.network.tenant_network_mask_bits
|
||||
for subnet_cidr in base_cidr.subnet(mask_bits):
|
||||
try:
|
||||
if self.network_resources:
|
||||
resp_body = self.network_admin_client.\
|
||||
create_subnet(
|
||||
network_id=network_id, cidr=str(subnet_cidr),
|
||||
name=subnet_name,
|
||||
tenant_id=tenant_id,
|
||||
enable_dhcp=self.network_resources['dhcp'],
|
||||
ip_version=4)
|
||||
else:
|
||||
resp_body = self.network_admin_client.\
|
||||
create_subnet(network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
name=subnet_name,
|
||||
tenant_id=tenant_id,
|
||||
ip_version=4)
|
||||
break
|
||||
except lib_exc.BadRequest as e:
|
||||
if 'overlaps with another subnet' not in str(e):
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise Exception(message)
|
||||
return resp_body['subnet']
|
||||
|
||||
def _create_router(self, router_name, tenant_id):
|
||||
external_net_id = dict(
|
||||
network_id=CONF.network.public_network_id)
|
||||
resp_body = self.network_admin_client.create_router(
|
||||
router_name,
|
||||
external_gateway_info=external_net_id,
|
||||
tenant_id=tenant_id)
|
||||
return resp_body['router']
|
||||
|
||||
def _add_router_interface(self, router_id, subnet_id):
|
||||
self.network_admin_client.add_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
|
||||
def get_credentials(self, credential_type):
|
||||
if self.isolated_creds.get(str(credential_type)):
|
||||
credentials = self.isolated_creds[str(credential_type)]
|
||||
else:
|
||||
if credential_type in ['primary', 'alt', 'admin']:
|
||||
is_admin = (credential_type == 'admin')
|
||||
credentials = self._create_creds(admin=is_admin)
|
||||
else:
|
||||
credentials = self._create_creds(roles=credential_type)
|
||||
self.isolated_creds[str(credential_type)] = credentials
|
||||
# Maintained until tests are ported
|
||||
LOG.info("Acquired isolated creds:\n credentials: %s"
|
||||
% credentials)
|
||||
if (CONF.service_available.neutron and
|
||||
not CONF.baremetal.driver_enabled):
|
||||
network, subnet, router = self._create_network_resources(
|
||||
credentials.tenant_id)
|
||||
credentials.set_resources(network=network, subnet=subnet,
|
||||
router=router)
|
||||
LOG.info("Created isolated network resources for : \n"
|
||||
+ " credentials: %s" % credentials)
|
||||
return credentials
|
||||
|
||||
def get_primary_creds(self):
|
||||
return self.get_credentials('primary')
|
||||
|
||||
def get_admin_creds(self):
|
||||
return self.get_credentials('admin')
|
||||
|
||||
def get_alt_creds(self):
|
||||
return self.get_credentials('alt')
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
roles = list(set(roles))
|
||||
# The roles list as a str will become the index as the dict key for
|
||||
# the created credentials set in the isolated_creds dict.
|
||||
exist_creds = self.isolated_creds.get(str(roles))
|
||||
# If force_new flag is True 2 cred sets with the same roles are needed
|
||||
# handle this by creating a separate index for old one to store it
|
||||
# separately for cleanup
|
||||
if exist_creds and force_new:
|
||||
new_index = str(roles) + '-' + str(len(self.isolated_creds))
|
||||
self.isolated_creds[new_index] = exist_creds
|
||||
del self.isolated_creds[str(roles)]
|
||||
return self.get_credentials(roles)
|
||||
|
||||
def _clear_isolated_router(self, router_id, router_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_router(router_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('router with name: %s not found for delete' %
|
||||
router_name)
|
||||
|
||||
def _clear_isolated_subnet(self, subnet_id, subnet_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_subnet(subnet_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('subnet with name: %s not found for delete' %
|
||||
subnet_name)
|
||||
|
||||
def _clear_isolated_network(self, network_id, network_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_network(network_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('network with name: %s not found for delete' %
|
||||
network_name)
|
||||
|
||||
def _cleanup_default_secgroup(self, tenant):
|
||||
net_client = self.network_admin_client
|
||||
resp_body = net_client.list_security_groups(tenant_id=tenant,
|
||||
name="default")
|
||||
secgroups_to_delete = resp_body['security_groups']
|
||||
for secgroup in secgroups_to_delete:
|
||||
try:
|
||||
net_client.delete_security_group(secgroup['id'])
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('Security group %s, id %s not found for clean-up' %
|
||||
(secgroup['name'], secgroup['id']))
|
||||
|
||||
def _clear_isolated_net_resources(self):
|
||||
net_client = self.network_admin_client
|
||||
for cred in self.isolated_creds:
|
||||
creds = self.isolated_creds.get(cred)
|
||||
if (not creds or not any([creds.router, creds.network,
|
||||
creds.subnet])):
|
||||
continue
|
||||
LOG.debug("Clearing network: %(network)s, "
|
||||
"subnet: %(subnet)s, router: %(router)s",
|
||||
{'network': creds.network, 'subnet': creds.subnet,
|
||||
'router': creds.router})
|
||||
if (not self.network_resources or
|
||||
(self.network_resources.get('router') and creds.subnet)):
|
||||
try:
|
||||
net_client.remove_router_interface_with_subnet_id(
|
||||
creds.router['id'], creds.subnet['id'])
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('router with name: %s not found for delete' %
|
||||
creds.router['name'])
|
||||
self._clear_isolated_router(creds.router['id'],
|
||||
creds.router['name'])
|
||||
if (not self.network_resources or
|
||||
self.network_resources.get('subnet')):
|
||||
self._clear_isolated_subnet(creds.subnet['id'],
|
||||
creds.subnet['name'])
|
||||
if (not self.network_resources or
|
||||
self.network_resources.get('network')):
|
||||
self._clear_isolated_network(creds.network['id'],
|
||||
creds.network['name'])
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
if not self.isolated_creds:
|
||||
return
|
||||
self._clear_isolated_net_resources()
|
||||
for creds in six.itervalues(self.isolated_creds):
|
||||
try:
|
||||
self.creds_client.delete_user(creds.user_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("user with name: %s not found for delete" %
|
||||
creds.username)
|
||||
try:
|
||||
if CONF.service_available.neutron:
|
||||
self._cleanup_default_secgroup(creds.tenant_id)
|
||||
self.creds_client.delete_project(creds.tenant_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("tenant with name: %s not found for delete" %
|
||||
creds.tenant_name)
|
||||
self.isolated_creds = {}
|
||||
|
||||
def is_multi_user(self):
|
||||
return True
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return True
|
||||
|
||||
def is_role_available(self, role):
|
||||
return True
|
|
@ -0,0 +1,85 @@
|
|||
# (c) 2014 Deutsche Telekom AG
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# Copyright 2014 NEC Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class NegativeRestClient(service_client.ServiceClient):
|
||||
"""
|
||||
Version of RestClient that does not raise exceptions.
|
||||
"""
|
||||
def __init__(self, auth_provider, service,
|
||||
build_interval=None, build_timeout=None,
|
||||
disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
region, endpoint_type = self._get_region_and_endpoint_type(service)
|
||||
super(NegativeRestClient, self).__init__(
|
||||
auth_provider,
|
||||
service,
|
||||
region,
|
||||
endpoint_type=endpoint_type,
|
||||
build_interval=build_interval,
|
||||
build_timeout=build_timeout,
|
||||
disable_ssl_certificate_validation=(
|
||||
disable_ssl_certificate_validation),
|
||||
ca_certs=ca_certs,
|
||||
trace_requests=trace_requests)
|
||||
|
||||
def _get_region_and_endpoint_type(self, service):
|
||||
"""
|
||||
Returns the region for a specific service
|
||||
"""
|
||||
service_region = None
|
||||
service_endpoint_type = None
|
||||
for cfgname in dir(CONF._config):
|
||||
# Find all config.FOO.catalog_type and assume FOO is a service.
|
||||
cfg = getattr(CONF, cfgname)
|
||||
catalog_type = getattr(cfg, 'catalog_type', None)
|
||||
if catalog_type == service:
|
||||
service_region = getattr(cfg, 'region', None)
|
||||
service_endpoint_type = getattr(cfg, 'endpoint_type', None)
|
||||
if not service_region:
|
||||
service_region = CONF.identity.region
|
||||
return service_region, service_endpoint_type
|
||||
|
||||
def _error_checker(self, method, url,
|
||||
headers, body, resp, resp_body):
|
||||
pass
|
||||
|
||||
def send_request(self, method, url_template, resources, body=None):
|
||||
url = url_template % tuple(resources)
|
||||
if method == "GET":
|
||||
resp, body = self.get(url)
|
||||
elif method == "POST":
|
||||
resp, body = self.post(url, body)
|
||||
elif method == "PUT":
|
||||
resp, body = self.put(url, body)
|
||||
elif method == "PATCH":
|
||||
resp, body = self.patch(url, body)
|
||||
elif method == "HEAD":
|
||||
resp, body = self.head(url)
|
||||
elif method == "DELETE":
|
||||
resp, body = self.delete(url)
|
||||
elif method == "COPY":
|
||||
resp, body = self.copy(url)
|
||||
else:
|
||||
assert False
|
||||
|
||||
return resp, body
|
|
@ -0,0 +1,85 @@
|
|||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib.common import rest_client
|
||||
|
||||
|
||||
class ServiceClient(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_provider, service, region,
|
||||
endpoint_type=None, build_interval=None, build_timeout=None,
|
||||
disable_ssl_certificate_validation=None, ca_certs=None,
|
||||
trace_requests=None):
|
||||
|
||||
dscv = disable_ssl_certificate_validation
|
||||
params = {
|
||||
'disable_ssl_certificate_validation': dscv,
|
||||
'ca_certs': ca_certs,
|
||||
'trace_requests': trace_requests
|
||||
}
|
||||
|
||||
if endpoint_type is not None:
|
||||
params.update({'endpoint_type': endpoint_type})
|
||||
if build_interval is not None:
|
||||
params.update({'build_interval': build_interval})
|
||||
if build_timeout is not None:
|
||||
params.update({'build_timeout': build_timeout})
|
||||
super(ServiceClient, self).__init__(auth_provider, service, region,
|
||||
**params)
|
||||
|
||||
|
||||
class ResponseBody(dict):
|
||||
"""Class that wraps an http response and dict body into a single value.
|
||||
|
||||
Callers that receive this object will normally use it as a dict but
|
||||
can extract the response if needed.
|
||||
"""
|
||||
|
||||
def __init__(self, response, body=None):
|
||||
body_data = body or {}
|
||||
self.update(body_data)
|
||||
self.response = response
|
||||
|
||||
def __str__(self):
|
||||
body = super(ResponseBody, self).__str__()
|
||||
return "response: %s\nBody: %s" % (self.response, body)
|
||||
|
||||
|
||||
class ResponseBodyData(object):
|
||||
"""Class that wraps an http response and string data into a single value.
|
||||
"""
|
||||
|
||||
def __init__(self, response, data):
|
||||
self.response = response
|
||||
self.data = data
|
||||
|
||||
def __str__(self):
|
||||
return "response: %s\nBody: %s" % (self.response, self.data)
|
||||
|
||||
|
||||
class ResponseBodyList(list):
|
||||
"""Class that wraps an http response and list body into a single value.
|
||||
|
||||
Callers that receive this object will normally use it as a list but
|
||||
can extract the response if needed.
|
||||
"""
|
||||
|
||||
def __init__(self, response, body=None):
|
||||
body_data = body or []
|
||||
self.extend(body_data)
|
||||
self.response = response
|
||||
|
||||
def __str__(self):
|
||||
body = super(ResponseBodyList, self).__str__()
|
||||
return "response: %s\nBody: %s" % (self.response, body)
|
|
@ -0,0 +1,152 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import cStringIO
|
||||
import select
|
||||
import socket
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
import paramiko
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Client(object):
|
||||
|
||||
def __init__(self, host, username, password=None, timeout=300, pkey=None,
|
||||
channel_timeout=10, look_for_keys=False, key_filename=None):
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
if isinstance(pkey, six.string_types):
|
||||
pkey = paramiko.RSAKey.from_private_key(
|
||||
cStringIO.StringIO(str(pkey)))
|
||||
self.pkey = pkey
|
||||
self.look_for_keys = look_for_keys
|
||||
self.key_filename = key_filename
|
||||
self.timeout = int(timeout)
|
||||
self.channel_timeout = float(channel_timeout)
|
||||
self.buf_size = 1024
|
||||
|
||||
def _get_ssh_connection(self, sleep=1.5, backoff=1):
|
||||
"""Returns an ssh connection to the specified host."""
|
||||
bsleep = sleep
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(
|
||||
paramiko.AutoAddPolicy())
|
||||
_start_time = time.time()
|
||||
if self.pkey is not None:
|
||||
LOG.info("Creating ssh connection to '%s' as '%s'"
|
||||
" with public key authentication",
|
||||
self.host, self.username)
|
||||
else:
|
||||
LOG.info("Creating ssh connection to '%s' as '%s'"
|
||||
" with password %s",
|
||||
self.host, self.username, str(self.password))
|
||||
attempts = 0
|
||||
while True:
|
||||
try:
|
||||
ssh.connect(self.host, username=self.username,
|
||||
password=self.password,
|
||||
look_for_keys=self.look_for_keys,
|
||||
key_filename=self.key_filename,
|
||||
timeout=self.channel_timeout, pkey=self.pkey)
|
||||
LOG.info("ssh connection to %s@%s successfuly created",
|
||||
self.username, self.host)
|
||||
return ssh
|
||||
except (socket.error,
|
||||
paramiko.SSHException) as e:
|
||||
if self._is_timed_out(_start_time):
|
||||
LOG.exception("Failed to establish authenticated ssh"
|
||||
" connection to %s@%s after %d attempts",
|
||||
self.username, self.host, attempts)
|
||||
raise exceptions.SSHTimeout(host=self.host,
|
||||
user=self.username,
|
||||
password=self.password)
|
||||
bsleep += backoff
|
||||
attempts += 1
|
||||
LOG.warning("Failed to establish authenticated ssh"
|
||||
" connection to %s@%s (%s). Number attempts: %s."
|
||||
" Retry after %d seconds.",
|
||||
self.username, self.host, e, attempts, bsleep)
|
||||
time.sleep(bsleep)
|
||||
|
||||
def _is_timed_out(self, start_time):
|
||||
return (time.time() - self.timeout) > start_time
|
||||
|
||||
def exec_command(self, cmd):
|
||||
"""
|
||||
Execute the specified command on the server.
|
||||
|
||||
Note that this method is reading whole command outputs to memory, thus
|
||||
shouldn't be used for large outputs.
|
||||
|
||||
:returns: data read from standard output of the command.
|
||||
:raises: SSHExecCommandFailed if command returns nonzero
|
||||
status. The exception contains command status stderr content.
|
||||
"""
|
||||
ssh = self._get_ssh_connection()
|
||||
transport = ssh.get_transport()
|
||||
channel = transport.open_session()
|
||||
channel.fileno() # Register event pipe
|
||||
channel.exec_command(cmd)
|
||||
channel.shutdown_write()
|
||||
out_data = []
|
||||
err_data = []
|
||||
poll = select.poll()
|
||||
poll.register(channel, select.POLLIN)
|
||||
start_time = time.time()
|
||||
|
||||
while True:
|
||||
ready = poll.poll(self.channel_timeout)
|
||||
if not any(ready):
|
||||
if not self._is_timed_out(start_time):
|
||||
continue
|
||||
raise exceptions.TimeoutException(
|
||||
"Command: '{0}' executed on host '{1}'.".format(
|
||||
cmd, self.host))
|
||||
if not ready[0]: # If there is nothing to read.
|
||||
continue
|
||||
out_chunk = err_chunk = None
|
||||
if channel.recv_ready():
|
||||
out_chunk = channel.recv(self.buf_size)
|
||||
out_data += out_chunk,
|
||||
if channel.recv_stderr_ready():
|
||||
err_chunk = channel.recv_stderr(self.buf_size)
|
||||
err_data += err_chunk,
|
||||
if channel.closed and not err_chunk and not out_chunk:
|
||||
break
|
||||
exit_status = channel.recv_exit_status()
|
||||
if 0 != exit_status:
|
||||
raise exceptions.SSHExecCommandFailed(
|
||||
command=cmd, exit_status=exit_status,
|
||||
strerror=''.join(err_data))
|
||||
return ''.join(out_data)
|
||||
|
||||
def test_connection_auth(self):
|
||||
"""Raises an exception when we can not connect to server via ssh."""
|
||||
connection = self._get_ssh_connection()
|
||||
connection.close()
|
|
@ -1,7 +1,4 @@
|
|||
#! /bin/sh
|
||||
|
||||
# Copyright (C) 2014 VA Linux Systems Japan K.K.
|
||||
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -16,16 +13,9 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# The purpose of this script is to avoid casual introduction of more
|
||||
# bash dependency. Please consider alternatives before commiting code
|
||||
# which uses bash specific features.
|
||||
from oslo_concurrency.fixture import lockutils
|
||||
|
||||
# Ignore comments, but include shebangs
|
||||
OBSERVED=$(grep -E '^([^#]|#!).*bash' tox.ini tools/* | wc -l)
|
||||
EXPECTED=5
|
||||
if [ ${EXPECTED} -ne ${OBSERVED} ]; then
|
||||
echo Unexpected number of bash usages are detected.
|
||||
echo Please read the comment in $0
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
|
||||
class LockFixture(lockutils.LockFixture):
|
||||
def __init__(self, name):
|
||||
super(LockFixture, self).__init__(name, 'tempest-')
|
|
@ -0,0 +1,3 @@
|
|||
PING_IPV4_COMMAND = 'ping -c 3 '
|
||||
PING_IPV6_COMMAND = 'ping6 -c 3 '
|
||||
PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'
|
|
@ -0,0 +1,101 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
import netaddr
|
||||
import random
|
||||
import uuid
|
||||
|
||||
|
||||
def rand_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def rand_uuid_hex():
|
||||
return uuid.uuid4().hex
|
||||
|
||||
|
||||
def rand_name(name=''):
|
||||
randbits = str(random.randint(1, 0x7fffffff))
|
||||
if name:
|
||||
return name + '-' + randbits
|
||||
else:
|
||||
return randbits
|
||||
|
||||
|
||||
def rand_url():
|
||||
randbits = str(random.randint(1, 0x7fffffff))
|
||||
return 'https://url-' + randbits + '.com'
|
||||
|
||||
|
||||
def rand_int_id(start=0, end=0x7fffffff):
|
||||
return random.randint(start, end)
|
||||
|
||||
|
||||
def rand_mac_address():
|
||||
"""Generate an Ethernet MAC address."""
|
||||
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
|
||||
# bridge mac addresses don't change, but it appears to
|
||||
# conflict with libvirt, so we use the next highest octet
|
||||
# that has the unicast and locally administered bits set
|
||||
# properly: 0xfa.
|
||||
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
|
||||
mac = [0xfa, 0x16, 0x3e,
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff)]
|
||||
return ':'.join(["%02x" % x for x in mac])
|
||||
|
||||
|
||||
def parse_image_id(image_ref):
|
||||
"""Return the image id from a given image ref."""
|
||||
return image_ref.rsplit('/')[-1]
|
||||
|
||||
|
||||
def arbitrary_string(size=4, base_text=None):
|
||||
"""
|
||||
Return size characters from base_text, repeating the base_text infinitely
|
||||
if needed.
|
||||
"""
|
||||
if not base_text:
|
||||
base_text = 'test'
|
||||
return ''.join(itertools.islice(itertools.cycle(base_text), size))
|
||||
|
||||
|
||||
def random_bytes(size=1024):
|
||||
"""
|
||||
Return size randomly selected bytes as a string.
|
||||
"""
|
||||
return ''.join([chr(random.randint(0, 255))
|
||||
for i in range(size)])
|
||||
|
||||
|
||||
def get_ipv6_addr_by_EUI64(cidr, mac):
|
||||
# Check if the prefix is IPv4 address
|
||||
is_ipv4 = netaddr.valid_ipv4(cidr)
|
||||
if is_ipv4:
|
||||
msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
|
||||
raise TypeError(msg)
|
||||
try:
|
||||
eui64 = int(netaddr.EUI(mac).eui64())
|
||||
prefix = netaddr.IPNetwork(cidr)
|
||||
return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
|
||||
except (ValueError, netaddr.AddrFormatError):
|
||||
raise TypeError('Bad prefix or mac format for generating IPv6 '
|
||||
'address by EUI-64: %(prefix)s, %(mac)s:'
|
||||
% {'prefix': cidr, 'mac': mac})
|
||||
except TypeError:
|
||||
raise TypeError('Bad prefix type for generate IPv6 address by '
|
||||
'EUI-64: %s' % cidr)
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def have_effective_read_access(path):
|
||||
try:
|
||||
fh = open(path, "rb")
|
||||
except IOError:
|
||||
return False
|
||||
fh.close()
|
||||
return True
|
|
@ -0,0 +1,87 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def singleton(cls):
|
||||
"""Simple wrapper for classes that should only have a single instance."""
|
||||
instances = {}
|
||||
|
||||
def getinstance():
|
||||
if cls not in instances:
|
||||
instances[cls] = cls()
|
||||
return instances[cls]
|
||||
return getinstance
|
||||
|
||||
|
||||
def find_test_caller():
|
||||
"""Find the caller class and test name.
|
||||
|
||||
Because we know that the interesting things that call us are
|
||||
test_* methods, and various kinds of setUp / tearDown, we
|
||||
can look through the call stack to find appropriate methods,
|
||||
and the class we were in when those were called.
|
||||
"""
|
||||
caller_name = None
|
||||
names = []
|
||||
frame = inspect.currentframe()
|
||||
is_cleanup = False
|
||||
# Start climbing the ladder until we hit a good method
|
||||
while True:
|
||||
try:
|
||||
frame = frame.f_back
|
||||
name = frame.f_code.co_name
|
||||
names.append(name)
|
||||
if re.search("^(test_|setUp|tearDown)", name):
|
||||
cname = ""
|
||||
if 'self' in frame.f_locals:
|
||||
cname = frame.f_locals['self'].__class__.__name__
|
||||
if 'cls' in frame.f_locals:
|
||||
cname = frame.f_locals['cls'].__name__
|
||||
caller_name = cname + ":" + name
|
||||
break
|
||||
elif re.search("^_run_cleanup", name):
|
||||
is_cleanup = True
|
||||
elif name == 'main':
|
||||
caller_name = 'main'
|
||||
break
|
||||
else:
|
||||
cname = ""
|
||||
if 'self' in frame.f_locals:
|
||||
cname = frame.f_locals['self'].__class__.__name__
|
||||
if 'cls' in frame.f_locals:
|
||||
cname = frame.f_locals['cls'].__name__
|
||||
|
||||
# the fact that we are running cleanups is indicated pretty
|
||||
# deep in the stack, so if we see that we want to just
|
||||
# start looking for a real class name, and declare victory
|
||||
# once we do.
|
||||
if is_cleanup and cname:
|
||||
if not re.search("^RunTest", cname):
|
||||
caller_name = cname + ":_run_cleanups"
|
||||
break
|
||||
except Exception:
|
||||
break
|
||||
# prevents frame leaks
|
||||
del frame
|
||||
if caller_name is None:
|
||||
LOG.debug("Sane call name not found in %s" % names)
|
||||
return caller_name
|
|
@ -0,0 +1,113 @@
|
|||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_ssh_security_group(os, add_rule=False):
|
||||
security_group_client = os.security_groups_client
|
||||
sg_name = data_utils.rand_name('securitygroup-')
|
||||
sg_description = data_utils.rand_name('description-')
|
||||
security_group = \
|
||||
security_group_client.create_security_group(sg_name, sg_description)
|
||||
if add_rule:
|
||||
security_group_client.create_security_group_rule(security_group['id'],
|
||||
'tcp', 22, 22)
|
||||
security_group_client.create_security_group_rule(security_group['id'],
|
||||
'icmp', -1, -1)
|
||||
LOG.debug("SSH Validation resource security group with tcp and icmp "
|
||||
"rules %s created"
|
||||
% sg_name)
|
||||
return security_group
|
||||
|
||||
|
||||
def create_validation_resources(os, validation_resources=None):
|
||||
# Create and Return the validation resources required to validate a VM
|
||||
validation_data = {}
|
||||
if validation_resources:
|
||||
if validation_resources['keypair']:
|
||||
keypair_name = data_utils.rand_name('keypair')
|
||||
validation_data['keypair'] = \
|
||||
os.keypairs_client.create_keypair(keypair_name)
|
||||
LOG.debug("Validation resource key %s created" % keypair_name)
|
||||
add_rule = False
|
||||
if validation_resources['security_group']:
|
||||
if validation_resources['security_group_rules']:
|
||||
add_rule = True
|
||||
validation_data['security_group'] = \
|
||||
create_ssh_security_group(os, add_rule)
|
||||
if validation_resources['floating_ip']:
|
||||
floating_client = os.floating_ips_client
|
||||
validation_data['floating_ip'] = \
|
||||
floating_client.create_floating_ip()
|
||||
return validation_data
|
||||
|
||||
|
||||
def clear_validation_resources(os, validation_data=None):
|
||||
# Cleanup the vm validation resources
|
||||
has_exception = None
|
||||
if validation_data:
|
||||
if 'keypair' in validation_data:
|
||||
keypair_client = os.keypairs_client
|
||||
keypair_name = validation_data['keypair']['name']
|
||||
try:
|
||||
keypair_client.delete_keypair(keypair_name)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("Keypair %s is not found when attempting to delete"
|
||||
% keypair_name)
|
||||
except Exception as exc:
|
||||
LOG.exception('Exception raised while deleting key %s'
|
||||
% keypair_name)
|
||||
if not has_exception:
|
||||
has_exception = exc
|
||||
if 'security_group' in validation_data:
|
||||
security_group_client = os.security_groups_client
|
||||
sec_id = validation_data['security_group']['id']
|
||||
try:
|
||||
security_group_client.delete_security_group(sec_id)
|
||||
security_group_client.wait_for_resource_deletion(sec_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("Security group %s is not found when attempting to "
|
||||
" delete" % sec_id)
|
||||
except lib_exc.Conflict as exc:
|
||||
LOG.exception('Conflict while deleting security '
|
||||
'group %s VM might not be deleted ' % sec_id)
|
||||
if not has_exception:
|
||||
has_exception = exc
|
||||
except Exception as exc:
|
||||
LOG.exception('Exception raised while deleting security '
|
||||
'group %s ' % sec_id)
|
||||
if not has_exception:
|
||||
has_exception = exc
|
||||
if 'floating_ip' in validation_data:
|
||||
floating_client = os.floating_ips_client
|
||||
fip_id = validation_data['floating_ip']['id']
|
||||
try:
|
||||
floating_client.delete_floating_ip(fip_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('Floating ip %s not found while attempting to delete'
|
||||
% fip_id)
|
||||
except Exception as exc:
|
||||
LOG.exception('Exception raised while deleting ip %s '
|
||||
% fip_id)
|
||||
if not has_exception:
|
||||
has_exception = exc
|
||||
if has_exception:
|
||||
raise has_exception
|
|
@ -0,0 +1,161 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import misc as misc_utils
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(afazekas): This function needs to know a token and a subject.
|
||||
def wait_for_server_status(client, server_id, status, ready_wait=True,
|
||||
extra_timeout=0, raise_on_error=True):
|
||||
"""Waits for a server to reach a given status."""
|
||||
|
||||
def _get_task_state(body):
|
||||
return body.get('OS-EXT-STS:task_state', None)
|
||||
|
||||
# NOTE(afazekas): UNKNOWN status possible on ERROR
|
||||
# or in a very early stage.
|
||||
body = client.get_server(server_id)
|
||||
old_status = server_status = body['status']
|
||||
old_task_state = task_state = _get_task_state(body)
|
||||
start_time = int(time.time())
|
||||
timeout = client.build_timeout + extra_timeout
|
||||
while True:
|
||||
# NOTE(afazekas): Now the BUILD status only reached
|
||||
# between the UNKNOWN->ACTIVE transition.
|
||||
# TODO(afazekas): enumerate and validate the stable status set
|
||||
if status == 'BUILD' and server_status != 'UNKNOWN':
|
||||
return
|
||||
if server_status == status:
|
||||
if ready_wait:
|
||||
if status == 'BUILD':
|
||||
return
|
||||
# NOTE(afazekas): The instance is in "ready for action state"
|
||||
# when no task in progress
|
||||
# NOTE(afazekas): Converted to string bacuse of the XML
|
||||
# responses
|
||||
if str(task_state) == "None":
|
||||
# without state api extension 3 sec usually enough
|
||||
time.sleep(CONF.compute.ready_wait)
|
||||
return
|
||||
else:
|
||||
return
|
||||
|
||||
time.sleep(client.build_interval)
|
||||
body = client.get_server(server_id)
|
||||
server_status = body['status']
|
||||
task_state = _get_task_state(body)
|
||||
if (server_status != old_status) or (task_state != old_task_state):
|
||||
LOG.info('State transition "%s" ==> "%s" after %d second wait',
|
||||
'/'.join((old_status, str(old_task_state))),
|
||||
'/'.join((server_status, str(task_state))),
|
||||
time.time() - start_time)
|
||||
if (server_status == 'ERROR') and raise_on_error:
|
||||
if 'fault' in body:
|
||||
raise exceptions.BuildErrorException(body['fault'],
|
||||
server_id=server_id)
|
||||
else:
|
||||
raise exceptions.BuildErrorException(server_id=server_id)
|
||||
|
||||
timed_out = int(time.time()) - start_time >= timeout
|
||||
|
||||
if timed_out:
|
||||
expected_task_state = 'None' if ready_wait else 'n/a'
|
||||
message = ('Server %(server_id)s failed to reach %(status)s '
|
||||
'status and task state "%(expected_task_state)s" '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'server_id': server_id,
|
||||
'status': status,
|
||||
'expected_task_state': expected_task_state,
|
||||
'timeout': timeout})
|
||||
message += ' Current status: %s.' % server_status
|
||||
message += ' Current task state: %s.' % task_state
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
old_status = server_status
|
||||
old_task_state = task_state
|
||||
|
||||
|
||||
def wait_for_image_status(client, image_id, status):
|
||||
"""Waits for an image to reach a given status.
|
||||
|
||||
The client should have a get_image(image_id) method to get the image.
|
||||
The client should also have build_interval and build_timeout attributes.
|
||||
"""
|
||||
image = client.get_image(image_id)
|
||||
start = int(time.time())
|
||||
|
||||
while image['status'] != status:
|
||||
time.sleep(client.build_interval)
|
||||
image = client.get_image(image_id)
|
||||
status_curr = image['status']
|
||||
if status_curr == 'ERROR':
|
||||
raise exceptions.AddImageException(image_id=image_id)
|
||||
|
||||
# check the status again to avoid a false negative where we hit
|
||||
# the timeout at the same time that the image reached the expected
|
||||
# status
|
||||
if status_curr == status:
|
||||
return
|
||||
|
||||
if int(time.time()) - start >= client.build_timeout:
|
||||
message = ('Image %(image_id)s failed to reach %(status)s state'
|
||||
'(current state %(status_curr)s) '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'image_id': image_id,
|
||||
'status': status,
|
||||
'status_curr': status_curr,
|
||||
'timeout': client.build_timeout})
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
|
||||
def wait_for_bm_node_status(client, node_id, attr, status):
|
||||
"""Waits for a baremetal node attribute to reach given status.
|
||||
|
||||
The client should have a show_node(node_uuid) method to get the node.
|
||||
"""
|
||||
_, node = client.show_node(node_id)
|
||||
start = int(time.time())
|
||||
|
||||
while node[attr] != status:
|
||||
time.sleep(client.build_interval)
|
||||
_, node = client.show_node(node_id)
|
||||
status_curr = node[attr]
|
||||
if status_curr == status:
|
||||
return
|
||||
|
||||
if int(time.time()) - start >= client.build_timeout:
|
||||
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'node_id': node_id,
|
||||
'attr': attr,
|
||||
'status': status,
|
||||
'timeout': client.build_timeout})
|
||||
message += ' Current state of %s: %s.' % (attr, status_curr)
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,183 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
|
||||
class TempestException(Exception):
|
||||
"""
|
||||
Base Tempest Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = "An unknown exception occurred"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TempestException, self).__init__()
|
||||
try:
|
||||
self._error_string = self.message % kwargs
|
||||
except Exception:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.message
|
||||
if len(args) > 0:
|
||||
# If there is a non-kwarg parameter, assume it's the error
|
||||
# message or reason description and tack it on to the end
|
||||
# of the exception message
|
||||
# Convert all arguments into their string representations...
|
||||
args = ["%s" % arg for arg in args]
|
||||
self._error_string = (self._error_string +
|
||||
"\nDetails: %s" % '\n'.join(args))
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class RestClientException(TempestException,
|
||||
testtools.TestCase.failureException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfiguration(TempestException):
|
||||
message = "Invalid Configuration"
|
||||
|
||||
|
||||
class InvalidCredentials(TempestException):
|
||||
message = "Invalid Credentials"
|
||||
|
||||
|
||||
class InvalidServiceTag(TempestException):
|
||||
message = "Invalid service tag"
|
||||
|
||||
|
||||
class InvalidIdentityVersion(TempestException):
|
||||
message = "Invalid version %(identity_version) of the identity service"
|
||||
|
||||
|
||||
class TimeoutException(TempestException):
|
||||
message = "Request timed out"
|
||||
|
||||
|
||||
class BuildErrorException(TempestException):
|
||||
message = "Server %(server_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class ImageKilledException(TempestException):
|
||||
message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
|
||||
|
||||
|
||||
class AddImageException(TempestException):
|
||||
message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
|
||||
|
||||
|
||||
class EC2RegisterImageException(TempestException):
|
||||
message = ("Image %(image_id)s failed to become 'available' "
|
||||
"in the allotted time")
|
||||
|
||||
|
||||
class VolumeBuildErrorException(TempestException):
|
||||
message = "Volume %(volume_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class SnapshotBuildErrorException(TempestException):
|
||||
message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class VolumeBackupException(TempestException):
|
||||
message = "Volume backup %(backup_id)s failed and is in ERROR status"
|
||||
|
||||
|
||||
class StackBuildErrorException(TempestException):
|
||||
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
|
||||
"due to '%(stack_status_reason)s'")
|
||||
|
||||
|
||||
class StackResourceBuildErrorException(TempestException):
|
||||
message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
|
||||
"in %(resource_status)s status due to "
|
||||
"'%(resource_status_reason)s'")
|
||||
|
||||
|
||||
class AuthenticationFailure(TempestException):
|
||||
message = ("Authentication with user %(user)s and password "
|
||||
"%(password)s failed auth using tenant %(tenant)s.")
|
||||
|
||||
|
||||
class EndpointNotFound(TempestException):
|
||||
message = "Endpoint not found"
|
||||
|
||||
|
||||
class ImageFault(TempestException):
|
||||
message = "Got image fault"
|
||||
|
||||
|
||||
class IdentityError(TempestException):
|
||||
message = "Got identity error"
|
||||
|
||||
|
||||
class ServerUnreachable(TempestException):
|
||||
message = "The server is not reachable via the configured network"
|
||||
|
||||
|
||||
class TearDownException(TempestException):
|
||||
message = "%(num)d cleanUp operation failed"
|
||||
|
||||
|
||||
class RFCViolation(RestClientException):
|
||||
message = "RFC Violation"
|
||||
|
||||
|
||||
class InvalidHttpSuccessCode(RestClientException):
|
||||
message = "The success code is different than the expected one"
|
||||
|
||||
|
||||
class BadRequest(RestClientException):
|
||||
message = "Bad request"
|
||||
|
||||
|
||||
class ResponseWithNonEmptyBody(RFCViolation):
|
||||
message = ("RFC Violation! Response with %(status)d HTTP Status Code "
|
||||
"MUST NOT have a body")
|
||||
|
||||
|
||||
class ResponseWithEntity(RFCViolation):
|
||||
message = ("RFC Violation! Response with 205 HTTP Status Code "
|
||||
"MUST NOT have an entity")
|
||||
|
||||
|
||||
class InvalidHTTPResponseHeader(RestClientException):
|
||||
message = "HTTP response header is invalid"
|
||||
|
||||
|
||||
class InvalidStructure(TempestException):
|
||||
message = "Invalid structure of table with details"
|
||||
|
||||
|
||||
class CommandFailed(Exception):
|
||||
def __init__(self, returncode, cmd, output, stderr):
|
||||
super(CommandFailed, self).__init__()
|
||||
self.returncode = returncode
|
||||
self.cmd = cmd
|
||||
self.stdout = output
|
||||
self.stderr = stderr
|
||||
|
||||
def __str__(self):
|
||||
return ("Command '%s' returned non-zero exit status %d.\n"
|
||||
"stdout:\n%s\n"
|
||||
"stderr:\n%s" % (self.cmd,
|
||||
self.returncode,
|
||||
self.stdout,
|
||||
self.stderr))
|
|
@ -0,0 +1,81 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import auth
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class Manager(object):
|
||||
|
||||
"""
|
||||
Base manager class
|
||||
|
||||
Manager objects are responsible for providing a configuration object
|
||||
and a client object for a test case to use in performing actions.
|
||||
"""
|
||||
|
||||
def __init__(self, credentials=None):
|
||||
"""
|
||||
We allow overriding of the credentials used within the various
|
||||
client classes managed by the Manager object. Left as None, the
|
||||
standard username/password/tenant_name[/domain_name] is used.
|
||||
|
||||
:param credentials: Override of the credentials
|
||||
"""
|
||||
self.auth_version = CONF.identity.auth_version
|
||||
if credentials is None:
|
||||
self.credentials = cred_provider.get_configured_credentials('user')
|
||||
else:
|
||||
self.credentials = credentials
|
||||
# Check if passed or default credentials are valid
|
||||
if not self.credentials.is_valid():
|
||||
raise exceptions.InvalidCredentials()
|
||||
# Tenant isolation creates TestResources, but Accounts and some tests
|
||||
# creates Credentials
|
||||
if isinstance(credentials, cred_provider.TestResources):
|
||||
creds = self.credentials.credentials
|
||||
else:
|
||||
creds = self.credentials
|
||||
# Creates an auth provider for the credentials
|
||||
self.auth_provider = get_auth_provider(creds)
|
||||
# FIXME(andreaf) unused
|
||||
self.client_attr_names = []
|
||||
|
||||
|
||||
def get_auth_provider_class(credentials):
|
||||
if isinstance(credentials, auth.KeystoneV3Credentials):
|
||||
return auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
|
||||
else:
|
||||
return auth.KeystoneV2AuthProvider, CONF.identity.uri
|
||||
|
||||
|
||||
def get_auth_provider(credentials):
|
||||
default_params = {
|
||||
'disable_ssl_certificate_validation':
|
||||
CONF.identity.disable_ssl_certificate_validation,
|
||||
'ca_certs': CONF.identity.ca_certificates_file,
|
||||
'trace_requests': CONF.debug.trace_requests
|
||||
}
|
||||
if credentials is None:
|
||||
raise exceptions.InvalidCredentials(
|
||||
'Credentials must be specified')
|
||||
auth_provider_class, auth_url = get_auth_provider_class(
|
||||
credentials)
|
||||
return auth_provider_class(credentials, auth_url, **default_params)
|
|
@ -0,0 +1,216 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from six.moves import configparser as ConfigParser
|
||||
import contextlib
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
import types
|
||||
import urlparse
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
|
||||
import boto
|
||||
import boto.ec2
|
||||
import boto.s3.connection
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BotoClientBase(object):
|
||||
|
||||
ALLOWED_METHODS = set()
|
||||
|
||||
def __init__(self, identity_client):
|
||||
self.identity_client = identity_client
|
||||
|
||||
self.ca_cert = CONF.identity.ca_certificates_file
|
||||
self.connection_timeout = str(CONF.boto.http_socket_timeout)
|
||||
self.num_retries = str(CONF.boto.num_retries)
|
||||
self.build_timeout = CONF.boto.build_timeout
|
||||
|
||||
self.connection_data = {}
|
||||
|
||||
def _config_boto_timeout(self, timeout, retries):
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "http_socket_timeout", timeout)
|
||||
boto.config.set("Boto", "num_retries", retries)
|
||||
|
||||
def _config_boto_ca_certificates_file(self, ca_cert):
|
||||
if ca_cert is None:
|
||||
return
|
||||
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "ca_certificates_file", ca_cert)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Automatically creates methods for the allowed methods set."""
|
||||
if name in self.ALLOWED_METHODS:
|
||||
def func(self, *args, **kwargs):
|
||||
with contextlib.closing(self.get_connection()) as conn:
|
||||
return getattr(conn, name)(*args, **kwargs)
|
||||
|
||||
func.__name__ = name
|
||||
setattr(self, name, types.MethodType(func, self, self.__class__))
|
||||
setattr(self.__class__, name,
|
||||
types.MethodType(func, None, self.__class__))
|
||||
return getattr(self, name)
|
||||
else:
|
||||
raise AttributeError(name)
|
||||
|
||||
def get_connection(self):
|
||||
self._config_boto_timeout(self.connection_timeout, self.num_retries)
|
||||
self._config_boto_ca_certificates_file(self.ca_cert)
|
||||
|
||||
ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
|
||||
'aws_secret_access_key': CONF.boto.aws_secret}
|
||||
if not all(ec2_client_args.values()):
|
||||
ec2_client_args = self.get_aws_credentials(self.identity_client)
|
||||
|
||||
self.connection_data.update(ec2_client_args)
|
||||
return self.connect_method(**self.connection_data)
|
||||
|
||||
def get_aws_credentials(self, identity_client):
|
||||
"""
|
||||
Obtain existing, or create new AWS credentials
|
||||
:param identity_client: identity client with embedded credentials
|
||||
:return: EC2 credentials
|
||||
"""
|
||||
ec2_cred_list = identity_client.list_user_ec2_credentials(
|
||||
identity_client.user_id)
|
||||
for cred in ec2_cred_list:
|
||||
if cred['tenant_id'] == identity_client.tenant_id:
|
||||
ec2_cred = cred
|
||||
break
|
||||
else:
|
||||
ec2_cred = identity_client.create_user_ec2_credentials(
|
||||
identity_client.user_id, identity_client.tenant_id)
|
||||
if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
|
||||
raise lib_exc.NotFound("Unable to get access and secret keys")
|
||||
else:
|
||||
ec2_cred_aws = {}
|
||||
ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
|
||||
ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
|
||||
return ec2_cred_aws
|
||||
|
||||
|
||||
class APIClientEC2(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_ec2(*args, **kwargs)
|
||||
|
||||
def __init__(self, identity_client):
|
||||
super(APIClientEC2, self).__init__(identity_client)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
purl = urlparse.urlparse(CONF.boto.ec2_url)
|
||||
|
||||
region_name = CONF.compute.region
|
||||
if not region_name:
|
||||
region_name = CONF.identity.region
|
||||
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
|
||||
endpoint=purl.hostname)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data.update({"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"region": region,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"path": purl.path})
|
||||
|
||||
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
|
||||
'delete_key_pair', 'import_key_pair',
|
||||
'get_all_key_pairs',
|
||||
'get_all_tags',
|
||||
'create_image', 'get_image',
|
||||
'register_image', 'deregister_image',
|
||||
'get_all_images', 'get_image_attribute',
|
||||
'modify_image_attribute', 'reset_image_attribute',
|
||||
'get_all_kernels',
|
||||
'create_volume', 'delete_volume',
|
||||
'get_all_volume_status', 'get_all_volumes',
|
||||
'get_volume_attribute', 'modify_volume_attribute'
|
||||
'bundle_instance', 'cancel_spot_instance_requests',
|
||||
'confirm_product_instanc',
|
||||
'get_all_instance_status', 'get_all_instances',
|
||||
'get_all_reserved_instances',
|
||||
'get_all_spot_instance_requests',
|
||||
'get_instance_attribute', 'monitor_instance',
|
||||
'monitor_instances', 'unmonitor_instance',
|
||||
'unmonitor_instances',
|
||||
'purchase_reserved_instance_offering',
|
||||
'reboot_instances', 'request_spot_instances',
|
||||
'reset_instance_attribute', 'run_instances',
|
||||
'start_instances', 'stop_instances',
|
||||
'terminate_instances',
|
||||
'attach_network_interface', 'attach_volume',
|
||||
'detach_network_interface', 'detach_volume',
|
||||
'get_console_output',
|
||||
'delete_network_interface', 'create_subnet',
|
||||
'create_network_interface', 'delete_subnet',
|
||||
'get_all_network_interfaces',
|
||||
'allocate_address', 'associate_address',
|
||||
'disassociate_address', 'get_all_addresses',
|
||||
'release_address',
|
||||
'create_snapshot', 'delete_snapshot',
|
||||
'get_all_snapshots', 'get_snapshot_attribute',
|
||||
'modify_snapshot_attribute',
|
||||
'reset_snapshot_attribute', 'trim_snapshots',
|
||||
'get_all_regions', 'get_all_zones',
|
||||
'get_all_security_groups', 'create_security_group',
|
||||
'delete_security_group', 'authorize_security_group',
|
||||
'authorize_security_group_egress',
|
||||
'revoke_security_group',
|
||||
'revoke_security_group_egress'))
|
||||
|
||||
|
||||
class ObjectClientS3(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_s3(*args, **kwargs)
|
||||
|
||||
def __init__(self, identity_client):
|
||||
super(ObjectClientS3, self).__init__(identity_client)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
purl = urlparse.urlparse(CONF.boto.s3_url)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data.update({"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"calling_format": boto.s3.connection.
|
||||
OrdinaryCallingFormat()})
|
||||
|
||||
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
|
||||
'get_all_buckets', 'get_bucket', 'delete_key',
|
||||
'lookup'))
|
|
@ -0,0 +1,283 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class IdentityClientJSON(service_client.ServiceClient):
|
||||
|
||||
def has_admin_extensions(self):
|
||||
"""
|
||||
Returns True if the KSADM Admin Extensions are supported
|
||||
False otherwise
|
||||
"""
|
||||
if hasattr(self, '_has_admin_extensions'):
|
||||
return self._has_admin_extensions
|
||||
# Try something that requires admin
|
||||
try:
|
||||
self.list_roles()
|
||||
self._has_admin_extensions = True
|
||||
except Exception:
|
||||
self._has_admin_extensions = False
|
||||
return self._has_admin_extensions
|
||||
|
||||
def create_role(self, name):
|
||||
"""Create a role."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.post('OS-KSADM/roles', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_role(self, role_id):
|
||||
"""Get a role by its id."""
|
||||
resp, body = self.get('OS-KSADM/roles/%s' % role_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def create_tenant(self, name, **kwargs):
|
||||
"""
|
||||
Create a tenant
|
||||
name (required): New tenant name
|
||||
description: Description of new tenant (default is none)
|
||||
enabled <true|false>: Initial tenant status (default is true)
|
||||
"""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'description': kwargs.get('description', ''),
|
||||
'enabled': kwargs.get('enabled', True),
|
||||
}
|
||||
post_body = json.dumps({'tenant': post_body})
|
||||
resp, body = self.post('tenants', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_role(self, role_id):
|
||||
"""Delete a role."""
|
||||
resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return resp, body
|
||||
|
||||
def list_user_roles(self, tenant_id, user_id):
|
||||
"""Returns a list of roles assigned to a user for a tenant."""
|
||||
url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def assign_user_role(self, tenant_id, user_id, role_id):
|
||||
"""Add roles to a user on a tenant."""
|
||||
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
|
||||
(tenant_id, user_id, role_id), "")
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def remove_user_role(self, tenant_id, user_id, role_id):
|
||||
"""Removes a role assignment for a user on a tenant."""
|
||||
resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
|
||||
(tenant_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_tenant(self, tenant_id):
|
||||
"""Delete a tenant."""
|
||||
resp, body = self.delete('tenants/%s' % str(tenant_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_tenant(self, tenant_id):
|
||||
"""Get tenant details."""
|
||||
resp, body = self.get('tenants/%s' % str(tenant_id))
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_roles(self):
|
||||
"""Returns roles."""
|
||||
resp, body = self.get('OS-KSADM/roles')
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def list_tenants(self):
|
||||
"""Returns tenants."""
|
||||
resp, body = self.get('tenants')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['tenants'])
|
||||
|
||||
def get_tenant_by_name(self, tenant_name):
|
||||
tenants = self.list_tenants()
|
||||
for tenant in tenants:
|
||||
if tenant['name'] == tenant_name:
|
||||
return tenant
|
||||
raise lib_exc.NotFound('No such tenant')
|
||||
|
||||
def update_tenant(self, tenant_id, **kwargs):
|
||||
"""Updates a tenant."""
|
||||
body = self.get_tenant(tenant_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
post_body = {
|
||||
'id': tenant_id,
|
||||
'name': name,
|
||||
'description': desc,
|
||||
'enabled': en,
|
||||
}
|
||||
post_body = json.dumps({'tenant': post_body})
|
||||
resp, body = self.post('tenants/%s' % tenant_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def create_user(self, name, password, tenant_id, email, **kwargs):
|
||||
"""Create a user."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'password': password,
|
||||
'email': email
|
||||
}
|
||||
if tenant_id is not None:
|
||||
post_body['tenantId'] = tenant_id
|
||||
if kwargs.get('enabled') is not None:
|
||||
post_body['enabled'] = kwargs.get('enabled')
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.post('users', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def update_user(self, user_id, **kwargs):
|
||||
"""Updates a user."""
|
||||
put_body = json.dumps({'user': kwargs})
|
||||
resp, body = self.put('users/%s' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_user(self, user_id):
|
||||
"""GET a user."""
|
||||
resp, body = self.get("users/%s" % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_user(self, user_id):
|
||||
"""Delete a user."""
|
||||
resp, body = self.delete("users/%s" % user_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_users(self):
|
||||
"""Get the list of users."""
|
||||
resp, body = self.get("users")
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def enable_disable_user(self, user_id, enabled):
|
||||
"""Enables or disables a user."""
|
||||
put_body = {
|
||||
'enabled': enabled
|
||||
}
|
||||
put_body = json.dumps({'user': put_body})
|
||||
resp, body = self.put('users/%s/enabled' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_token(self, token_id):
|
||||
"""Get token details."""
|
||||
resp, body = self.get("tokens/%s" % token_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_token(self, token_id):
|
||||
"""Delete a token."""
|
||||
resp, body = self.delete("tokens/%s" % token_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_users_for_tenant(self, tenant_id):
|
||||
"""List users for a Tenant."""
|
||||
resp, body = self.get('/tenants/%s/users' % tenant_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def get_user_by_username(self, tenant_id, username):
|
||||
users = self.list_users_for_tenant(tenant_id)
|
||||
for user in users:
|
||||
if user['name'] == username:
|
||||
return user
|
||||
raise lib_exc.NotFound('No such user')
|
||||
|
||||
def create_service(self, name, type, **kwargs):
|
||||
"""Create a service."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'type': type,
|
||||
'description': kwargs.get('description')
|
||||
}
|
||||
post_body = json.dumps({'OS-KSADM:service': post_body})
|
||||
resp, body = self.post('/OS-KSADM/services', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_service(self, service_id):
|
||||
"""Get Service."""
|
||||
url = '/OS-KSADM/services/%s' % service_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_services(self):
|
||||
"""List Service - Returns Services."""
|
||||
resp, body = self.get('/OS-KSADM/services')
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def delete_service(self, service_id):
|
||||
"""Delete Service."""
|
||||
url = '/OS-KSADM/services/%s' % service_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_user_password(self, user_id, new_pass):
|
||||
"""Update User Password."""
|
||||
put_body = {
|
||||
'password': new_pass,
|
||||
'id': user_id
|
||||
}
|
||||
put_body = json.dumps({'user': put_body})
|
||||
resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_extensions(self):
|
||||
"""List all the extensions."""
|
||||
resp, body = self.get('/extensions')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp,
|
||||
body['extensions']['values'])
|
||||
|
||||
def create_user_ec2_credentials(self, user_id, tenant_id):
|
||||
post_body = json.dumps({'tenant_id': tenant_id})
|
||||
resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
|
||||
post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_user_ec2_credentials(self, user_id):
|
||||
resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
|
@ -0,0 +1,110 @@
|
|||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib.common import rest_client
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
|
||||
class TokenClientJSON(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
dscv = disable_ssl_certificate_validation
|
||||
super(TokenClientJSON, self).__init__(
|
||||
None, None, None, disable_ssl_certificate_validation=dscv,
|
||||
ca_certs=ca_certs, trace_requests=trace_requests)
|
||||
|
||||
# Normalize URI to ensure /tokens is in it.
|
||||
if 'tokens' not in auth_url:
|
||||
auth_url = auth_url.rstrip('/') + '/tokens'
|
||||
|
||||
self.auth_url = auth_url
|
||||
|
||||
def auth(self, user, password, tenant=None):
|
||||
creds = {
|
||||
'auth': {
|
||||
'passwordCredentials': {
|
||||
'username': user,
|
||||
'password': password,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if tenant:
|
||||
creds['auth']['tenantName'] = tenant
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(200, resp.status)
|
||||
|
||||
return service_client.ResponseBody(resp, body['access'])
|
||||
|
||||
def auth_token(self, token_id, tenant=None):
|
||||
creds = {
|
||||
'auth': {
|
||||
'token': {
|
||||
'id': token_id,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if tenant:
|
||||
creds['auth']['tenantName'] = tenant
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(200, resp.status)
|
||||
|
||||
return service_client.ResponseBody(resp, body['access'])
|
||||
|
||||
def request(self, method, url, extra_headers=False, headers=None,
|
||||
body=None):
|
||||
"""A simple HTTP request interface."""
|
||||
if headers is None:
|
||||
headers = self.get_headers(accept_type="json")
|
||||
elif extra_headers:
|
||||
try:
|
||||
headers.update(self.get_headers(accept_type="json"))
|
||||
except (ValueError, TypeError):
|
||||
headers = self.get_headers(accept_type="json")
|
||||
|
||||
resp, resp_body = self.raw_request(url, method,
|
||||
headers=headers, body=body)
|
||||
self._log_request(method, url, resp)
|
||||
|
||||
if resp.status in [401, 403]:
|
||||
resp_body = json.loads(resp_body)
|
||||
raise lib_exc.Unauthorized(resp_body['error']['message'])
|
||||
elif resp.status not in [200, 201]:
|
||||
raise exceptions.IdentityError(
|
||||
'Unexpected status code {0}'.format(resp.status))
|
||||
|
||||
if isinstance(resp_body, str):
|
||||
resp_body = json.loads(resp_body)
|
||||
return resp, resp_body
|
||||
|
||||
def get_token(self, user, password, tenant, auth_data=False):
|
||||
"""
|
||||
Returns (token id, token data) for supplied credentials
|
||||
"""
|
||||
body = self.auth(user, password, tenant)
|
||||
|
||||
if auth_data:
|
||||
return body['token']['id'], body
|
||||
else:
|
||||
return body['token']['id']
|
|
@ -0,0 +1,83 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class CredentialsClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_credential(self, access_key, secret_key, user_id, project_id):
|
||||
"""Creates a credential."""
|
||||
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
|
||||
access_key, secret_key)
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"project_id": project_id,
|
||||
"type": "ec2",
|
||||
"user_id": user_id
|
||||
}
|
||||
post_body = json.dumps({'credential': post_body})
|
||||
resp, body = self.post('credentials', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def update_credential(self, credential_id, **kwargs):
|
||||
"""Updates a credential."""
|
||||
body = self.get_credential(credential_id)
|
||||
cred_type = kwargs.get('type', body['type'])
|
||||
access_key = kwargs.get('access_key', body['blob']['access'])
|
||||
secret_key = kwargs.get('secret_key', body['blob']['secret'])
|
||||
project_id = kwargs.get('project_id', body['project_id'])
|
||||
user_id = kwargs.get('user_id', body['user_id'])
|
||||
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
|
||||
access_key, secret_key)
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"project_id": project_id,
|
||||
"type": cred_type,
|
||||
"user_id": user_id
|
||||
}
|
||||
post_body = json.dumps({'credential': post_body})
|
||||
resp, body = self.patch('credentials/%s' % credential_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def get_credential(self, credential_id):
|
||||
"""To GET Details of a credential."""
|
||||
resp, body = self.get('credentials/%s' % credential_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def list_credentials(self):
|
||||
"""Lists out all the available credentials."""
|
||||
resp, body = self.get('credentials')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['credentials'])
|
||||
|
||||
def delete_credential(self, credential_id):
|
||||
"""Deletes a credential."""
|
||||
resp, body = self.delete('credentials/%s' % credential_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
|
@ -0,0 +1,87 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class EndPointClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def list_endpoints(self):
|
||||
"""GET endpoints."""
|
||||
resp, body = self.get('endpoints')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['endpoints'])
|
||||
|
||||
def create_endpoint(self, service_id, interface, url, **kwargs):
|
||||
"""Create endpoint.
|
||||
|
||||
Normally this function wouldn't allow setting values that are not
|
||||
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
|
||||
|
||||
"""
|
||||
region = kwargs.get('region', None)
|
||||
if 'force_enabled' in kwargs:
|
||||
enabled = kwargs.get('force_enabled', None)
|
||||
else:
|
||||
enabled = kwargs.get('enabled', None)
|
||||
post_body = {
|
||||
'service_id': service_id,
|
||||
'interface': interface,
|
||||
'url': url,
|
||||
'region': region,
|
||||
'enabled': enabled
|
||||
}
|
||||
post_body = json.dumps({'endpoint': post_body})
|
||||
resp, body = self.post('endpoints', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['endpoint'])
|
||||
|
||||
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
|
||||
url=None, region=None, enabled=None, **kwargs):
|
||||
"""Updates an endpoint with given parameters.
|
||||
|
||||
Normally this function wouldn't allow setting values that are not
|
||||
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
|
||||
|
||||
"""
|
||||
post_body = {}
|
||||
if service_id is not None:
|
||||
post_body['service_id'] = service_id
|
||||
if interface is not None:
|
||||
post_body['interface'] = interface
|
||||
if url is not None:
|
||||
post_body['url'] = url
|
||||
if region is not None:
|
||||
post_body['region'] = region
|
||||
if 'force_enabled' in kwargs:
|
||||
post_body['enabled'] = kwargs['force_enabled']
|
||||
elif enabled is not None:
|
||||
post_body['enabled'] = enabled
|
||||
post_body = json.dumps({'endpoint': post_body})
|
||||
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['endpoint'])
|
||||
|
||||
def delete_endpoint(self, endpoint_id):
|
||||
"""Delete endpoint."""
|
||||
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
|
||||
self.expected_success(204, resp_header.status)
|
||||
return service_client.ResponseBody(resp_header, resp_body)
|
|
@ -0,0 +1,523 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class IdentityV3ClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_user(self, user_name, password=None, project_id=None,
|
||||
email=None, domain_id='default', **kwargs):
|
||||
"""Creates a user."""
|
||||
en = kwargs.get('enabled', True)
|
||||
description = kwargs.get('description', None)
|
||||
default_project_id = kwargs.get('default_project_id')
|
||||
post_body = {
|
||||
'project_id': project_id,
|
||||
'default_project_id': default_project_id,
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'email': email,
|
||||
'enabled': en,
|
||||
'name': user_name,
|
||||
'password': password
|
||||
}
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.post('users', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def update_user(self, user_id, name, **kwargs):
|
||||
"""Updates a user."""
|
||||
body = self.get_user(user_id)
|
||||
email = kwargs.get('email', body['email'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
project_id = kwargs.get('project_id', body['project_id'])
|
||||
if 'default_project_id' in body.keys():
|
||||
default_project_id = kwargs.get('default_project_id',
|
||||
body['default_project_id'])
|
||||
else:
|
||||
default_project_id = kwargs.get('default_project_id')
|
||||
description = kwargs.get('description', body['description'])
|
||||
domain_id = kwargs.get('domain_id', body['domain_id'])
|
||||
post_body = {
|
||||
'name': name,
|
||||
'email': email,
|
||||
'enabled': en,
|
||||
'project_id': project_id,
|
||||
'default_project_id': default_project_id,
|
||||
'id': user_id,
|
||||
'domain_id': domain_id,
|
||||
'description': description
|
||||
}
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.patch('users/%s' % user_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def update_user_password(self, user_id, password, original_password):
|
||||
"""Updates a user password."""
|
||||
update_user = {
|
||||
'password': password,
|
||||
'original_password': original_password
|
||||
}
|
||||
update_user = json.dumps({'user': update_user})
|
||||
resp, _ = self.post('users/%s/password' % user_id, update_user)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp)
|
||||
|
||||
def list_user_projects(self, user_id):
|
||||
"""Lists the projects on which a user has roles assigned."""
|
||||
resp, body = self.get('users/%s/projects' % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['projects'])
|
||||
|
||||
def get_users(self, params=None):
|
||||
"""Get the list of users."""
|
||||
url = 'users'
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['users'])
|
||||
|
||||
def get_user(self, user_id):
|
||||
"""GET a user."""
|
||||
resp, body = self.get("users/%s" % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def delete_user(self, user_id):
|
||||
"""Deletes a User."""
|
||||
resp, body = self.delete("users/%s" % user_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_project(self, name, **kwargs):
|
||||
"""Creates a project."""
|
||||
description = kwargs.get('description', None)
|
||||
en = kwargs.get('enabled', True)
|
||||
domain_id = kwargs.get('domain_id', 'default')
|
||||
post_body = {
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'project': post_body})
|
||||
resp, body = self.post('projects', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def list_projects(self, params=None):
|
||||
url = "projects"
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['projects'])
|
||||
|
||||
def update_project(self, project_id, **kwargs):
|
||||
body = self.get_project(project_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
domain_id = kwargs.get('domain_id', body['domain_id'])
|
||||
post_body = {
|
||||
'id': project_id,
|
||||
'name': name,
|
||||
'description': desc,
|
||||
'enabled': en,
|
||||
'domain_id': domain_id,
|
||||
}
|
||||
post_body = json.dumps({'project': post_body})
|
||||
resp, body = self.patch('projects/%s' % project_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def get_project(self, project_id):
|
||||
"""GET a Project."""
|
||||
resp, body = self.get("projects/%s" % project_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def delete_project(self, project_id):
|
||||
"""Delete a project."""
|
||||
resp, body = self.delete('projects/%s' % str(project_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_role(self, name):
|
||||
"""Create a Role."""
|
||||
post_body = {
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.post('roles', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def get_role(self, role_id):
|
||||
"""GET a Role."""
|
||||
resp, body = self.get('roles/%s' % str(role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def list_roles(self):
|
||||
"""Get the list of Roles."""
|
||||
resp, body = self.get("roles")
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def update_role(self, name, role_id):
|
||||
"""Create a Role."""
|
||||
post_body = {
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.patch('roles/%s' % str(role_id), post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def delete_role(self, role_id):
|
||||
"""Delete a role."""
|
||||
resp, body = self.delete('roles/%s' % str(role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role(self, project_id, user_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_domain(self, name, **kwargs):
|
||||
"""Creates a domain."""
|
||||
description = kwargs.get('description', None)
|
||||
en = kwargs.get('enabled', True)
|
||||
post_body = {
|
||||
'description': description,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'domain': post_body})
|
||||
resp, body = self.post('domains', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def delete_domain(self, domain_id):
|
||||
"""Delete a domain."""
|
||||
resp, body = self.delete('domains/%s' % str(domain_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_domains(self):
|
||||
"""List Domains."""
|
||||
resp, body = self.get('domains')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['domains'])
|
||||
|
||||
def update_domain(self, domain_id, **kwargs):
|
||||
"""Updates a domain."""
|
||||
body = self.get_domain(domain_id)
|
||||
description = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
name = kwargs.get('name', body['name'])
|
||||
post_body = {
|
||||
'description': description,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'domain': post_body})
|
||||
resp, body = self.patch('domains/%s' % domain_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def get_domain(self, domain_id):
|
||||
"""Get Domain details."""
|
||||
resp, body = self.get('domains/%s' % domain_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def get_token(self, resp_token):
|
||||
"""Get token details."""
|
||||
headers = {'X-Subject-Token': resp_token}
|
||||
resp, body = self.get("auth/tokens", headers=headers)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['token'])
|
||||
|
||||
def delete_token(self, resp_token):
|
||||
"""Deletes token."""
|
||||
headers = {'X-Subject-Token': resp_token}
|
||||
resp, body = self.delete("auth/tokens", headers=headers)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_group(self, name, **kwargs):
|
||||
"""Creates a group."""
|
||||
description = kwargs.get('description', None)
|
||||
domain_id = kwargs.get('domain_id', 'default')
|
||||
project_id = kwargs.get('project_id', None)
|
||||
post_body = {
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'project_id': project_id,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'group': post_body})
|
||||
resp, body = self.post('groups', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def get_group(self, group_id):
|
||||
"""Get group details."""
|
||||
resp, body = self.get('groups/%s' % group_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def list_groups(self):
|
||||
"""Lists the groups."""
|
||||
resp, body = self.get('groups')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['groups'])
|
||||
|
||||
def update_group(self, group_id, **kwargs):
|
||||
"""Updates a group."""
|
||||
body = self.get_group(group_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
description = kwargs.get('description', body['description'])
|
||||
post_body = {
|
||||
'name': name,
|
||||
'description': description
|
||||
}
|
||||
post_body = json.dumps({'group': post_body})
|
||||
resp, body = self.patch('groups/%s' % group_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def delete_group(self, group_id):
|
||||
"""Delete a group."""
|
||||
resp, body = self.delete('groups/%s' % str(group_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_group_user(self, group_id, user_id):
|
||||
"""Add user into group."""
|
||||
resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
|
||||
None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_group_users(self, group_id):
|
||||
"""List users in group."""
|
||||
resp, body = self.get('groups/%s/users' % group_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['users'])
|
||||
|
||||
def list_user_groups(self, user_id):
|
||||
"""Lists groups which a user belongs to."""
|
||||
resp, body = self.get('users/%s/groups' % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['groups'])
|
||||
|
||||
def delete_group_user(self, group_id, user_id):
|
||||
"""Delete user in group."""
|
||||
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role_on_project(self, project_id, user_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role_on_domain(self, domain_id, user_id, role_id):
|
||||
"""Add roles to a user on a domain."""
|
||||
resp, body = self.put('domains/%s/users/%s/roles/%s' %
|
||||
(domain_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_user_roles_on_project(self, project_id, user_id):
|
||||
"""list roles of a user on a project."""
|
||||
resp, body = self.get('projects/%s/users/%s/roles' %
|
||||
(project_id, user_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def list_user_roles_on_domain(self, domain_id, user_id):
|
||||
"""list roles of a user on a domain."""
|
||||
resp, body = self.get('domains/%s/users/%s/roles' %
|
||||
(domain_id, user_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
|
||||
"""Delete role of a user on a project."""
|
||||
resp, body = self.delete('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
|
||||
"""Delete role of a user on a domain."""
|
||||
resp, body = self.delete('domains/%s/users/%s/roles/%s' %
|
||||
(domain_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_group_role_on_project(self, project_id, group_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/groups/%s/roles/%s' %
|
||||
(project_id, group_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_group_role_on_domain(self, domain_id, group_id, role_id):
|
||||
"""Add roles to a user on a domain."""
|
||||
resp, body = self.put('domains/%s/groups/%s/roles/%s' %
|
||||
(domain_id, group_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_group_roles_on_project(self, project_id, group_id):
|
||||
"""list roles of a user on a project."""
|
||||
resp, body = self.get('projects/%s/groups/%s/roles' %
|
||||
(project_id, group_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def list_group_roles_on_domain(self, domain_id, group_id):
|
||||
"""list roles of a user on a domain."""
|
||||
resp, body = self.get('domains/%s/groups/%s/roles' %
|
||||
(domain_id, group_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
|
||||
"""Delete role of a user on a project."""
|
||||
resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
|
||||
(project_id, group_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
|
||||
"""Delete role of a user on a domain."""
|
||||
resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
|
||||
(domain_id, group_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_trust(self, trustor_user_id, trustee_user_id, project_id,
|
||||
role_names, impersonation, expires_at):
|
||||
"""Creates a trust."""
|
||||
roles = [{'name': n} for n in role_names]
|
||||
post_body = {
|
||||
'trustor_user_id': trustor_user_id,
|
||||
'trustee_user_id': trustee_user_id,
|
||||
'project_id': project_id,
|
||||
'impersonation': impersonation,
|
||||
'roles': roles,
|
||||
'expires_at': expires_at
|
||||
}
|
||||
post_body = json.dumps({'trust': post_body})
|
||||
resp, body = self.post('OS-TRUST/trusts', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['trust'])
|
||||
|
||||
def delete_trust(self, trust_id):
|
||||
"""Deletes a trust."""
|
||||
resp, body = self.delete("OS-TRUST/trusts/%s" % trust_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_trusts(self, trustor_user_id=None, trustee_user_id=None):
|
||||
"""GET trusts."""
|
||||
if trustor_user_id:
|
||||
resp, body = self.get("OS-TRUST/trusts?trustor_user_id=%s"
|
||||
% trustor_user_id)
|
||||
elif trustee_user_id:
|
||||
resp, body = self.get("OS-TRUST/trusts?trustee_user_id=%s"
|
||||
% trustee_user_id)
|
||||
else:
|
||||
resp, body = self.get("OS-TRUST/trusts")
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['trusts'])
|
||||
|
||||
def get_trust(self, trust_id):
|
||||
"""GET trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['trust'])
|
||||
|
||||
def get_trust_roles(self, trust_id):
|
||||
"""GET roles delegated by a trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def get_trust_role(self, trust_id, role_id):
|
||||
"""GET role delegated by a trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s/roles/%s"
|
||||
% (trust_id, role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def check_trust_role(self, trust_id, role_id):
|
||||
"""HEAD Check if role is delegated by a trust."""
|
||||
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
|
||||
% (trust_id, role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
|
@ -0,0 +1,69 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class PolicyClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_policy(self, blob, type):
|
||||
"""Creates a Policy."""
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"type": type
|
||||
}
|
||||
post_body = json.dumps({'policy': post_body})
|
||||
resp, body = self.post('policies', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def list_policies(self):
|
||||
"""Lists the policies."""
|
||||
resp, body = self.get('policies')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['policies'])
|
||||
|
||||
def get_policy(self, policy_id):
|
||||
"""Lists out the given policy."""
|
||||
url = 'policies/%s' % policy_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def update_policy(self, policy_id, **kwargs):
|
||||
"""Updates a policy."""
|
||||
type = kwargs.get('type')
|
||||
post_body = {
|
||||
'type': type
|
||||
}
|
||||
post_body = json.dumps({'policy': post_body})
|
||||
url = 'policies/%s' % policy_id
|
||||
resp, body = self.patch(url, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def delete_policy(self, policy_id):
|
||||
"""Deletes the policy."""
|
||||
url = "policies/%s" % policy_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
|
@ -0,0 +1,77 @@
|
|||
# Copyright 2014 Hewlett-Packard Development Company, L.P
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class RegionClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_region(self, description, **kwargs):
|
||||
"""Create region."""
|
||||
req_body = {
|
||||
'description': description,
|
||||
}
|
||||
if kwargs.get('parent_region_id'):
|
||||
req_body['parent_region_id'] = kwargs.get('parent_region_id')
|
||||
req_body = json.dumps({'region': req_body})
|
||||
if kwargs.get('unique_region_id'):
|
||||
resp, body = self.put(
|
||||
'regions/%s' % kwargs.get('unique_region_id'), req_body)
|
||||
else:
|
||||
resp, body = self.post('regions', req_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def update_region(self, region_id, **kwargs):
|
||||
"""Updates a region."""
|
||||
post_body = {}
|
||||
if 'description' in kwargs:
|
||||
post_body['description'] = kwargs.get('description')
|
||||
if 'parent_region_id' in kwargs:
|
||||
post_body['parent_region_id'] = kwargs.get('parent_region_id')
|
||||
post_body = json.dumps({'region': post_body})
|
||||
resp, body = self.patch('regions/%s' % region_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def get_region(self, region_id):
|
||||
"""Get region."""
|
||||
url = 'regions/%s' % region_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def list_regions(self, params=None):
|
||||
"""List regions."""
|
||||
url = 'regions'
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['regions'])
|
||||
|
||||
def delete_region(self, region_id):
|
||||
"""Delete region."""
|
||||
resp, body = self.delete('regions/%s' % region_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
|
@ -0,0 +1,73 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class ServiceClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def update_service(self, service_id, **kwargs):
|
||||
"""Updates a service."""
|
||||
body = self.get_service(service_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
type = kwargs.get('type', body['type'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
patch_body = {
|
||||
'description': desc,
|
||||
'type': type,
|
||||
'name': name
|
||||
}
|
||||
patch_body = json.dumps({'service': patch_body})
|
||||
resp, body = self.patch('services/%s' % service_id, patch_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['service'])
|
||||
|
||||
def get_service(self, service_id):
|
||||
"""Get Service."""
|
||||
url = 'services/%s' % service_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['service'])
|
||||
|
||||
def create_service(self, serv_type, name=None, description=None,
|
||||
enabled=True):
|
||||
body_dict = {
|
||||
'name': name,
|
||||
'type': serv_type,
|
||||
'enabled': enabled,
|
||||
'description': description,
|
||||
}
|
||||
body = json.dumps({'service': body_dict})
|
||||
resp, body = self.post("services", body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body["service"])
|
||||
|
||||
def delete_service(self, serv_id):
|
||||
url = "services/" + serv_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_services(self):
|
||||
resp, body = self.get('services')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['services'])
|
|
@ -0,0 +1,172 @@
|
|||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib.common import rest_client
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
|
||||
class V3TokenClientJSON(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
dscv = disable_ssl_certificate_validation
|
||||
super(V3TokenClientJSON, self).__init__(
|
||||
None, None, None, disable_ssl_certificate_validation=dscv,
|
||||
ca_certs=ca_certs, trace_requests=trace_requests)
|
||||
if not auth_url:
|
||||
raise exceptions.InvalidConfiguration('you must specify a v3 uri '
|
||||
'if using the v3 identity '
|
||||
'api')
|
||||
if 'auth/tokens' not in auth_url:
|
||||
auth_url = auth_url.rstrip('/') + '/auth/tokens'
|
||||
|
||||
self.auth_url = auth_url
|
||||
|
||||
def auth(self, user_id=None, username=None, password=None, project_id=None,
|
||||
project_name=None, user_domain_id=None, user_domain_name=None,
|
||||
project_domain_id=None, project_domain_name=None, domain_id=None,
|
||||
domain_name=None, token=None):
|
||||
"""
|
||||
:param user_id: user id
|
||||
:param username: user name
|
||||
:param user_domain_id: the user domain id
|
||||
:param user_domain_name: the user domain name
|
||||
:param project_domain_id: the project domain id
|
||||
:param project_domain_name: the project domain name
|
||||
:param domain_id: a domain id to scope to
|
||||
:param domain_name: a domain name to scope to
|
||||
:param project_id: a project id to scope to
|
||||
:param project_name: a project name to scope to
|
||||
:param token: a token to re-scope.
|
||||
|
||||
Accepts different combinations of credentials.
|
||||
Sample sample valid combinations:
|
||||
- token
|
||||
- token, project_name, project_domain_id
|
||||
- user_id, password
|
||||
- username, password, user_domain_id
|
||||
- username, password, project_name, user_domain_id, project_domain_id
|
||||
Validation is left to the server side.
|
||||
"""
|
||||
creds = {
|
||||
'auth': {
|
||||
'identity': {
|
||||
'methods': [],
|
||||
}
|
||||
}
|
||||
}
|
||||
id_obj = creds['auth']['identity']
|
||||
if token:
|
||||
id_obj['methods'].append('token')
|
||||
id_obj['token'] = {
|
||||
'id': token
|
||||
}
|
||||
|
||||
if (user_id or username) and password:
|
||||
id_obj['methods'].append('password')
|
||||
id_obj['password'] = {
|
||||
'user': {
|
||||
'password': password,
|
||||
}
|
||||
}
|
||||
if user_id:
|
||||
id_obj['password']['user']['id'] = user_id
|
||||
else:
|
||||
id_obj['password']['user']['name'] = username
|
||||
|
||||
_domain = None
|
||||
if user_domain_id is not None:
|
||||
_domain = dict(id=user_domain_id)
|
||||
elif user_domain_name is not None:
|
||||
_domain = dict(name=user_domain_name)
|
||||
if _domain:
|
||||
id_obj['password']['user']['domain'] = _domain
|
||||
|
||||
if (project_id or project_name):
|
||||
_project = dict()
|
||||
|
||||
if project_id:
|
||||
_project['id'] = project_id
|
||||
elif project_name:
|
||||
_project['name'] = project_name
|
||||
|
||||
if project_domain_id is not None:
|
||||
_project['domain'] = {'id': project_domain_id}
|
||||
elif project_domain_name is not None:
|
||||
_project['domain'] = {'name': project_domain_name}
|
||||
|
||||
creds['auth']['scope'] = dict(project=_project)
|
||||
elif domain_id:
|
||||
creds['auth']['scope'] = dict(domain={'id': domain_id})
|
||||
elif domain_name:
|
||||
creds['auth']['scope'] = dict(domain={'name': domain_name})
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def request(self, method, url, extra_headers=False, headers=None,
|
||||
body=None):
|
||||
"""A simple HTTP request interface."""
|
||||
if headers is None:
|
||||
# Always accept 'json', for xml token client too.
|
||||
# Because XML response is not easily
|
||||
# converted to the corresponding JSON one
|
||||
headers = self.get_headers(accept_type="json")
|
||||
elif extra_headers:
|
||||
try:
|
||||
headers.update(self.get_headers(accept_type="json"))
|
||||
except (ValueError, TypeError):
|
||||
headers = self.get_headers(accept_type="json")
|
||||
|
||||
resp, resp_body = self.raw_request(url, method,
|
||||
headers=headers, body=body)
|
||||
self._log_request(method, url, resp)
|
||||
|
||||
if resp.status in [401, 403]:
|
||||
resp_body = json.loads(resp_body)
|
||||
raise lib_exc.Unauthorized(resp_body['error']['message'])
|
||||
elif resp.status not in [200, 201, 204]:
|
||||
raise exceptions.IdentityError(
|
||||
'Unexpected status code {0}'.format(resp.status))
|
||||
|
||||
return resp, json.loads(resp_body)
|
||||
|
||||
def get_token(self, **kwargs):
|
||||
"""
|
||||
Returns (token id, token data) for supplied credentials
|
||||
"""
|
||||
|
||||
auth_data = kwargs.pop('auth_data', False)
|
||||
|
||||
if not (kwargs.get('user_domain_id') or
|
||||
kwargs.get('user_domain_name')):
|
||||
kwargs['user_domain_name'] = 'Default'
|
||||
|
||||
if not (kwargs.get('project_domain_id') or
|
||||
kwargs.get('project_domain_name')):
|
||||
kwargs['project_domain_name'] = 'Default'
|
||||
|
||||
body = self.auth(**kwargs)
|
||||
|
||||
token = body.response.get('x-subject-token')
|
||||
if auth_data:
|
||||
return token, body['token']
|
||||
else:
|
||||
return token
|
|
@ -0,0 +1,622 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
import urllib
|
||||
|
||||
from tempest_lib.common.utils import misc
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
|
||||
class NetworkClientJSON(service_client.ServiceClient):
|
||||
|
||||
"""
|
||||
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
|
||||
V1 API has been removed from the code base.
|
||||
|
||||
Implements create, delete, update, list and show for the basic Neutron
|
||||
abstractions (networks, sub-networks, routers, ports and floating IP):
|
||||
|
||||
Implements add/remove interface to router using subnet ID / port ID
|
||||
|
||||
It also implements list, show, update and reset for OpenStack Networking
|
||||
quotas
|
||||
"""
|
||||
|
||||
version = '2.0'
|
||||
uri_prefix = "v2.0"
|
||||
|
||||
def get_uri(self, plural_name):
|
||||
# get service prefix from resource name
|
||||
|
||||
# The following list represents resource names that do not require
|
||||
# changing underscore to a hyphen
|
||||
hyphen_exceptions = ["health_monitors", "firewall_rules",
|
||||
"firewall_policies"]
|
||||
# the following map is used to construct proper URI
|
||||
# for the given neutron resource
|
||||
service_resource_prefix_map = {
|
||||
'networks': '',
|
||||
'subnets': '',
|
||||
'subnetpools': '',
|
||||
'ports': '',
|
||||
'pools': 'lb',
|
||||
'vips': 'lb',
|
||||
'health_monitors': 'lb',
|
||||
'members': 'lb',
|
||||
'ipsecpolicies': 'vpn',
|
||||
'vpnservices': 'vpn',
|
||||
'ikepolicies': 'vpn',
|
||||
'ipsec-site-connections': 'vpn',
|
||||
'metering_labels': 'metering',
|
||||
'metering_label_rules': 'metering',
|
||||
'firewall_rules': 'fw',
|
||||
'firewall_policies': 'fw',
|
||||
'firewalls': 'fw'
|
||||
}
|
||||
service_prefix = service_resource_prefix_map.get(
|
||||
plural_name)
|
||||
if plural_name not in hyphen_exceptions:
|
||||
plural_name = plural_name.replace("_", "-")
|
||||
if service_prefix:
|
||||
uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
|
||||
plural_name)
|
||||
else:
|
||||
uri = '%s/%s' % (self.uri_prefix, plural_name)
|
||||
return uri
|
||||
|
||||
def pluralize(self, resource_name):
|
||||
# get plural from map or just add 's'
|
||||
|
||||
# map from resource name to a plural name
|
||||
# needed only for those which can't be constructed as name + 's'
|
||||
resource_plural_map = {
|
||||
'security_groups': 'security_groups',
|
||||
'security_group_rules': 'security_group_rules',
|
||||
'ipsecpolicy': 'ipsecpolicies',
|
||||
'ikepolicy': 'ikepolicies',
|
||||
'ipsec_site_connection': 'ipsec-site-connections',
|
||||
'quotas': 'quotas',
|
||||
'firewall_policy': 'firewall_policies'
|
||||
}
|
||||
return resource_plural_map.get(resource_name, resource_name + 's')
|
||||
|
||||
def _lister(self, plural_name):
|
||||
def _list(**filters):
|
||||
uri = self.get_uri(plural_name)
|
||||
if filters:
|
||||
uri += '?' + urllib.urlencode(filters, doseq=1)
|
||||
resp, body = self.get(uri)
|
||||
result = {plural_name: self.deserialize_list(body)}
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, result)
|
||||
|
||||
return _list
|
||||
|
||||
def _deleter(self, resource_name):
|
||||
def _delete(resource_id):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), resource_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _delete
|
||||
|
||||
def _shower(self, resource_name):
|
||||
def _show(resource_id, **fields):
|
||||
# fields is a dict which key is 'fields' and value is a
|
||||
# list of field's name. An example:
|
||||
# {'fields': ['id', 'name']}
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), resource_id)
|
||||
if fields:
|
||||
uri += '?' + urllib.urlencode(fields, doseq=1)
|
||||
resp, body = self.get(uri)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _show
|
||||
|
||||
def _creater(self, resource_name):
|
||||
def _create(**kwargs):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = self.get_uri(plural)
|
||||
post_data = self.serialize({resource_name: kwargs})
|
||||
resp, body = self.post(uri, post_data)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _create
|
||||
|
||||
def _updater(self, resource_name):
|
||||
def _update(res_id, **kwargs):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), res_id)
|
||||
post_data = self.serialize({resource_name: kwargs})
|
||||
resp, body = self.put(uri, post_data)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _update
|
||||
|
||||
def __getattr__(self, name):
|
||||
method_prefixes = ["list_", "delete_", "show_", "create_", "update_"]
|
||||
method_functors = [self._lister,
|
||||
self._deleter,
|
||||
self._shower,
|
||||
self._creater,
|
||||
self._updater]
|
||||
for index, prefix in enumerate(method_prefixes):
|
||||
prefix_len = len(prefix)
|
||||
if name[:prefix_len] == prefix:
|
||||
return method_functors[index](name[prefix_len:])
|
||||
raise AttributeError(name)
|
||||
|
||||
# Subnetpool methods
|
||||
def create_subnetpool(self, post_data):
|
||||
body = self.serialize_list(post_data, "subnetpools", "subnetpool")
|
||||
uri = self.get_uri("subnetpools")
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'subnetpool': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_subnetpool(self, id):
|
||||
uri = self.get_uri("subnetpools")
|
||||
subnetpool_uri = '%s/%s' % (uri, id)
|
||||
resp, body = self.get(subnetpool_uri)
|
||||
body = {'subnetpool': self.deserialize_list(body)}
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_subnetpool(self, id):
|
||||
uri = self.get_uri("subnetpools")
|
||||
subnetpool_uri = '%s/%s' % (uri, id)
|
||||
resp, body = self.delete(subnetpool_uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_subnetpools(self):
|
||||
uri = self.get_uri("subnetpools")
|
||||
resp, body = self.get(uri)
|
||||
body = {'subnetpools': self.deserialize_list(body)}
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_subnetpool(self, id, post_data):
|
||||
body = self.serialize_list(post_data, "subnetpools", "subnetpool")
|
||||
uri = self.get_uri("subnetpools")
|
||||
subnetpool_uri = '%s/%s' % (uri, id)
|
||||
resp, body = self.put(subnetpool_uri, body)
|
||||
body = {'subnetpool': self.deserialize_list(body)}
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
# Common methods that are hard to automate
|
||||
def create_bulk_network(self, names, shared=False):
|
||||
network_list = [{'name': name, 'shared': shared} for name in names]
|
||||
post_data = {'networks': network_list}
|
||||
body = self.serialize_list(post_data, "networks", "network")
|
||||
uri = self.get_uri("networks")
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'networks': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_bulk_subnet(self, subnet_list):
|
||||
post_data = {'subnets': subnet_list}
|
||||
body = self.serialize_list(post_data, 'subnets', 'subnet')
|
||||
uri = self.get_uri('subnets')
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'subnets': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_bulk_port(self, port_list):
|
||||
post_data = {'ports': port_list}
|
||||
body = self.serialize_list(post_data, 'ports', 'port')
|
||||
uri = self.get_uri('ports')
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'ports': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def wait_for_resource_deletion(self, resource_type, id):
|
||||
"""Waits for a resource to be deleted."""
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
if self.is_resource_deleted(resource_type, id):
|
||||
return
|
||||
if int(time.time()) - start_time >= self.build_timeout:
|
||||
raise exceptions.TimeoutException
|
||||
time.sleep(self.build_interval)
|
||||
|
||||
def is_resource_deleted(self, resource_type, id):
|
||||
method = 'show_' + resource_type
|
||||
try:
|
||||
getattr(self, method)(id)
|
||||
except AttributeError:
|
||||
raise Exception("Unknown resource type %s " % resource_type)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
def wait_for_resource_status(self, fetch, status, interval=None,
|
||||
timeout=None):
|
||||
"""
|
||||
@summary: Waits for a network resource to reach a status
|
||||
@param fetch: the callable to be used to query the resource status
|
||||
@type fecth: callable that takes no parameters and returns the resource
|
||||
@param status: the status that the resource has to reach
|
||||
@type status: String
|
||||
@param interval: the number of seconds to wait between each status
|
||||
query
|
||||
@type interval: Integer
|
||||
@param timeout: the maximum number of seconds to wait for the resource
|
||||
to reach the desired status
|
||||
@type timeout: Integer
|
||||
"""
|
||||
if not interval:
|
||||
interval = self.build_interval
|
||||
if not timeout:
|
||||
timeout = self.build_timeout
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time <= timeout:
|
||||
resource = fetch()
|
||||
if resource['status'] == status:
|
||||
return
|
||||
time.sleep(interval)
|
||||
|
||||
# At this point, the wait has timed out
|
||||
message = 'Resource %s' % (str(resource))
|
||||
message += ' failed to reach status %s' % status
|
||||
message += ' (current: %s)' % resource['status']
|
||||
message += ' within the required time %s' % timeout
|
||||
caller = misc.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
def deserialize_single(self, body):
|
||||
return json.loads(body)
|
||||
|
||||
def deserialize_list(self, body):
|
||||
res = json.loads(body)
|
||||
# expecting response in form
|
||||
# {'resources': [ res1, res2] } => when pagination disabled
|
||||
# {'resources': [..], 'resources_links': {}} => if pagination enabled
|
||||
for k in res.keys():
|
||||
if k.endswith("_links"):
|
||||
continue
|
||||
return res[k]
|
||||
|
||||
def serialize(self, data):
|
||||
return json.dumps(data)
|
||||
|
||||
def serialize_list(self, data, root=None, item=None):
|
||||
return self.serialize(data)
|
||||
|
||||
def update_quotas(self, tenant_id, **kwargs):
|
||||
put_body = {'quota': kwargs}
|
||||
body = json.dumps(put_body)
|
||||
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['quota'])
|
||||
|
||||
def reset_quotas(self, tenant_id):
|
||||
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_router(self, name, admin_state_up=True, **kwargs):
|
||||
post_body = {'router': kwargs}
|
||||
post_body['router']['name'] = name
|
||||
post_body['router']['admin_state_up'] = admin_state_up
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/routers' % (self.uri_prefix)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def _update_router(self, router_id, set_enable_snat, **kwargs):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
update_body = {}
|
||||
update_body['name'] = kwargs.get('name', body['router']['name'])
|
||||
update_body['admin_state_up'] = kwargs.get(
|
||||
'admin_state_up', body['router']['admin_state_up'])
|
||||
cur_gw_info = body['router']['external_gateway_info']
|
||||
if cur_gw_info:
|
||||
# TODO(kevinbenton): setting the external gateway info is not
|
||||
# allowed for a regular tenant. If the ability to update is also
|
||||
# merged, a test case for this will need to be added similar to
|
||||
# the SNAT case.
|
||||
cur_gw_info.pop('external_fixed_ips', None)
|
||||
if not set_enable_snat:
|
||||
cur_gw_info.pop('enable_snat', None)
|
||||
update_body['external_gateway_info'] = kwargs.get(
|
||||
'external_gateway_info', body['router']['external_gateway_info'])
|
||||
if 'distributed' in kwargs:
|
||||
update_body['distributed'] = kwargs['distributed']
|
||||
update_body = dict(router=update_body)
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_router(self, router_id, **kwargs):
|
||||
"""Update a router leaving enable_snat to its default value."""
|
||||
# If external_gateway_info contains enable_snat the request will fail
|
||||
# with 404 unless executed with admin client, and therefore we instruct
|
||||
# _update_router to not set this attribute
|
||||
# NOTE(salv-orlando): The above applies as long as Neutron's default
|
||||
# policy is to restrict enable_snat usage to admins only.
|
||||
return self._update_router(router_id, set_enable_snat=False, **kwargs)
|
||||
|
||||
def update_router_with_snat_gw_info(self, router_id, **kwargs):
|
||||
"""Update a router passing also the enable_snat attribute.
|
||||
|
||||
This method must be execute with admin credentials, otherwise the API
|
||||
call will return a 404 error.
|
||||
"""
|
||||
return self._update_router(router_id, set_enable_snat=True, **kwargs)
|
||||
|
||||
def add_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"subnet_id": subnet_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_router_interface_with_port_id(self, router_id, port_id):
|
||||
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"port_id": port_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"subnet_id": subnet_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_interface_with_port_id(self, router_id, port_id):
|
||||
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"port_id": port_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def associate_health_monitor_with_pool(self, health_monitor_id,
|
||||
pool_id):
|
||||
post_body = {
|
||||
"health_monitor": {
|
||||
"id": health_monitor_id,
|
||||
}
|
||||
}
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix,
|
||||
pool_id)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def disassociate_health_monitor_with_pool(self, health_monitor_id,
|
||||
pool_id):
|
||||
uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
|
||||
health_monitor_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_router_interfaces(self, uuid):
|
||||
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_agent(self, agent_id, agent_info):
|
||||
"""
|
||||
:param agent_info: Agent update information.
|
||||
E.g {"admin_state_up": True}
|
||||
"""
|
||||
uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
|
||||
agent = {"agent": agent_info}
|
||||
body = json.dumps(agent)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_pools_hosted_by_one_lbaas_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/loadbalancer-pools' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def show_lbaas_agent_hosting_pool(self, pool_id):
|
||||
uri = ('%s/lb/pools/%s/loadbalancer-agent' %
|
||||
(self.uri_prefix, pool_id))
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_routers_on_l3_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_l3_agents_hosting_router(self, router_id):
|
||||
uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_router_to_l3_agent(self, agent_id, router_id):
|
||||
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
|
||||
post_body = {"router_id": router_id}
|
||||
body = json.dumps(post_body)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_from_l3_agent(self, agent_id, router_id):
|
||||
uri = '%s/agents/%s/l3-routers/%s' % (
|
||||
self.uri_prefix, agent_id, router_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_dhcp_agent_hosting_network(self, network_id):
|
||||
uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_network_from_dhcp_agent(self, agent_id, network_id):
|
||||
uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
|
||||
network_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_ikepolicy(self, name, **kwargs):
|
||||
post_body = {
|
||||
"ikepolicy": {
|
||||
"name": name,
|
||||
}
|
||||
}
|
||||
for key, val in kwargs.items():
|
||||
post_body['ikepolicy'][key] = val
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_extra_routes(self, router_id, nexthop, destination):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
put_body = {
|
||||
'router': {
|
||||
'routes': [{'nexthop': nexthop,
|
||||
"destination": destination}]
|
||||
}
|
||||
}
|
||||
body = json.dumps(put_body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_extra_routes(self, router_id):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
null_routes = None
|
||||
put_body = {
|
||||
'router': {
|
||||
'routes': null_routes
|
||||
}
|
||||
}
|
||||
body = json.dumps(put_body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_lb_pool_stats(self, pool_id):
|
||||
uri = '%s/lb/pools/%s/stats' % (self.uri_prefix, pool_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_dhcp_agent_to_network(self, agent_id, network_id):
|
||||
post_body = {'network_id': network_id}
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def insert_firewall_rule_in_policy(self, firewall_policy_id,
|
||||
firewall_rule_id, insert_after="",
|
||||
insert_before=""):
|
||||
uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
|
||||
firewall_policy_id)
|
||||
body = {
|
||||
"firewall_rule_id": firewall_rule_id,
|
||||
"insert_after": insert_after,
|
||||
"insert_before": insert_before
|
||||
}
|
||||
body = json.dumps(body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_firewall_rule_from_policy(self, firewall_policy_id,
|
||||
firewall_rule_id):
|
||||
uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
|
||||
firewall_policy_id)
|
||||
update_body = {"firewall_rule_id": firewall_rule_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
|
@ -0,0 +1,189 @@
|
|||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class AttributeDict(dict):
|
||||
|
||||
"""
|
||||
Provide attribute access (dict.key) to dictionary values.
|
||||
"""
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Allow attribute access for all keys in the dict."""
|
||||
if name in self:
|
||||
return self[name]
|
||||
return super(AttributeDict, self).__getattribute__(name)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DeletableResource(AttributeDict):
|
||||
|
||||
"""
|
||||
Support deletion of neutron resources (networks, subnets) via a
|
||||
delete() method, as is supported by keystone and nova resources.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = kwargs.pop('client', None)
|
||||
super(DeletableResource, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '<%s id="%s" name="%s">' % (self.__class__.__name__,
|
||||
self.id, self.name)
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def refresh(self):
|
||||
return
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.id)
|
||||
|
||||
def wait_for_status(self, status):
|
||||
if not hasattr(self, 'status'):
|
||||
return
|
||||
|
||||
def helper_get():
|
||||
self.refresh()
|
||||
return self
|
||||
|
||||
return self.client.wait_for_resource_status(helper_get, status)
|
||||
|
||||
|
||||
class DeletableNetwork(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_network(self.id)
|
||||
|
||||
|
||||
class DeletableSubnet(DeletableResource):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeletableSubnet, self).__init__(*args, **kwargs)
|
||||
self._router_ids = set()
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_subnet(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableSubnet, self).update(**result['subnet'])
|
||||
|
||||
def add_to_router(self, router_id):
|
||||
self._router_ids.add(router_id)
|
||||
self.client.add_router_interface_with_subnet_id(router_id,
|
||||
subnet_id=self.id)
|
||||
|
||||
def delete(self):
|
||||
for router_id in self._router_ids.copy():
|
||||
self.client.remove_router_interface_with_subnet_id(
|
||||
router_id,
|
||||
subnet_id=self.id)
|
||||
self._router_ids.remove(router_id)
|
||||
self.client.delete_subnet(self.id)
|
||||
|
||||
|
||||
class DeletableRouter(DeletableResource):
|
||||
|
||||
def set_gateway(self, network_id):
|
||||
return self.update(external_gateway_info=dict(network_id=network_id))
|
||||
|
||||
def unset_gateway(self):
|
||||
return self.update(external_gateway_info=dict())
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_router(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableRouter, self).update(**result['router'])
|
||||
|
||||
def delete(self):
|
||||
self.unset_gateway()
|
||||
self.client.delete_router(self.id)
|
||||
|
||||
|
||||
class DeletableFloatingIp(DeletableResource):
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
result = self.client.show_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s addr="%s">' % (self.__class__.__name__,
|
||||
self.floating_ip_address)
|
||||
|
||||
def __str__(self):
|
||||
return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
|
||||
self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_floatingip(self.id)
|
||||
|
||||
|
||||
class DeletablePort(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_port(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroup(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroupRule(DeletableResource):
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s id="%s">' % (self.__class__.__name__, self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group_rule(self.id)
|
||||
|
||||
|
||||
class DeletablePool(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_pool(self.id)
|
||||
|
||||
|
||||
class DeletableMember(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_member(self.id)
|
||||
|
||||
|
||||
class DeletableVip(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_vip(self.id)
|
||||
|
||||
def refresh(self):
|
||||
result = self.client.show_vip(self.id)
|
||||
super(DeletableVip, self).update(**result['vip'])
|
|
@ -0,0 +1,827 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import atexit
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
import testscenarios
|
||||
import testtools
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import clients
|
||||
from neutron_lbaas.tests.tempest.lib.common import credentials
|
||||
from neutron_lbaas.tests.tempest.lib.common import fixed_network
|
||||
import neutron_lbaas.tests.tempest.lib.common.generator.valid_generator as valid
|
||||
import neutron_lbaas.tests.tempest.lib.common.validation_resources as vresources
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
def attr(*args, **kwargs):
|
||||
"""A decorator which applies the testtools attr decorator
|
||||
|
||||
This decorator applies the testtools.testcase.attr if it is in the list of
|
||||
attributes to testtools we want to apply.
|
||||
"""
|
||||
|
||||
def decorator(f):
|
||||
if 'type' in kwargs and isinstance(kwargs['type'], str):
|
||||
f = testtools.testcase.attr(kwargs['type'])(f)
|
||||
elif 'type' in kwargs and isinstance(kwargs['type'], list):
|
||||
for attr in kwargs['type']:
|
||||
f = testtools.testcase.attr(attr)(f)
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def idempotent_id(id):
|
||||
"""Stub for metadata decorator"""
|
||||
if not isinstance(id, six.string_types):
|
||||
raise TypeError('Test idempotent_id must be string not %s'
|
||||
'' % type(id).__name__)
|
||||
uuid.UUID(id)
|
||||
|
||||
def decorator(f):
|
||||
f = testtools.testcase.attr('id-%s' % id)(f)
|
||||
if f.__doc__:
|
||||
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
|
||||
else:
|
||||
f.__doc__ = 'Test idempotent id: %s' % id
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def get_service_list():
|
||||
service_list = {
|
||||
'compute': CONF.service_available.nova,
|
||||
'image': CONF.service_available.glance,
|
||||
'baremetal': CONF.service_available.ironic,
|
||||
'volume': CONF.service_available.cinder,
|
||||
'orchestration': CONF.service_available.heat,
|
||||
# NOTE(mtreinish) nova-network will provide networking functionality
|
||||
# if neutron isn't available, so always set to True.
|
||||
'network': True,
|
||||
'identity': True,
|
||||
'object_storage': CONF.service_available.swift,
|
||||
'dashboard': CONF.service_available.horizon,
|
||||
'telemetry': CONF.service_available.ceilometer,
|
||||
'data_processing': CONF.service_available.sahara,
|
||||
'database': CONF.service_available.trove
|
||||
}
|
||||
return service_list
|
||||
|
||||
|
||||
def services(*args, **kwargs):
|
||||
"""A decorator used to set an attr for each service used in a test case
|
||||
|
||||
This decorator applies a testtools attr for each service that gets
|
||||
exercised by a test case.
|
||||
"""
|
||||
def decorator(f):
|
||||
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
|
||||
'network', 'identity', 'object_storage', 'dashboard',
|
||||
'telemetry', 'data_processing', 'database']
|
||||
for service in args:
|
||||
if service not in services:
|
||||
raise exceptions.InvalidServiceTag('%s is not a valid '
|
||||
'service' % service)
|
||||
attr(type=list(args))(f)
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(self, *func_args, **func_kwargs):
|
||||
service_list = get_service_list()
|
||||
|
||||
for service in args:
|
||||
if not service_list[service]:
|
||||
msg = 'Skipped because the %s service is not available' % (
|
||||
service)
|
||||
raise testtools.TestCase.skipException(msg)
|
||||
return f(self, *func_args, **func_kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def stresstest(*args, **kwargs):
|
||||
"""Add stress test decorator
|
||||
|
||||
For all functions with this decorator a attr stress will be
|
||||
set automatically.
|
||||
|
||||
@param class_setup_per: allowed values are application, process, action
|
||||
``application``: once in the stress job lifetime
|
||||
``process``: once in the worker process lifetime
|
||||
``action``: on each action
|
||||
@param allow_inheritance: allows inheritance of this attribute
|
||||
"""
|
||||
def decorator(f):
|
||||
if 'class_setup_per' in kwargs:
|
||||
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
|
||||
else:
|
||||
setattr(f, "st_class_setup_per", 'process')
|
||||
if 'allow_inheritance' in kwargs:
|
||||
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
|
||||
else:
|
||||
setattr(f, "st_allow_inheritance", False)
|
||||
attr(type='stress')(f)
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def requires_ext(*args, **kwargs):
|
||||
"""A decorator to skip tests if an extension is not enabled
|
||||
|
||||
@param extension
|
||||
@param service
|
||||
"""
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*func_args, **func_kwargs):
|
||||
if not is_extension_enabled(kwargs['extension'],
|
||||
kwargs['service']):
|
||||
msg = "Skipped because %s extension: %s is not enabled" % (
|
||||
kwargs['service'], kwargs['extension'])
|
||||
raise testtools.TestCase.skipException(msg)
|
||||
return func(*func_args, **func_kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def is_extension_enabled(extension_name, service):
|
||||
"""A function that will check the list of enabled extensions from config
|
||||
|
||||
"""
|
||||
config_dict = {
|
||||
'compute': CONF.compute_feature_enabled.api_extensions,
|
||||
'volume': CONF.volume_feature_enabled.api_extensions,
|
||||
'network': CONF.network_feature_enabled.api_extensions,
|
||||
'object': CONF.object_storage_feature_enabled.discoverable_apis,
|
||||
}
|
||||
if len(config_dict[service]) == 0:
|
||||
return False
|
||||
if config_dict[service][0] == 'all':
|
||||
return True
|
||||
if extension_name in config_dict[service]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
at_exit_set = set()
|
||||
|
||||
|
||||
def validate_tearDownClass():
|
||||
if at_exit_set:
|
||||
LOG.error(
|
||||
"tearDownClass does not call the super's "
|
||||
"tearDownClass in these classes: \n"
|
||||
+ str(at_exit_set))
|
||||
|
||||
|
||||
atexit.register(validate_tearDownClass)
|
||||
|
||||
|
||||
class BaseTestCase(testtools.testcase.WithAttributes,
|
||||
testtools.TestCase):
|
||||
"""The test base class defines Tempest framework for class level fixtures.
|
||||
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
|
||||
by subclasses (enforced via hacking rule T105).
|
||||
|
||||
Set-up is split in a series of steps (setup stages), which can be
|
||||
overwritten by test classes. Set-up stages are:
|
||||
- skip_checks
|
||||
- setup_credentials
|
||||
- setup_clients
|
||||
- resource_setup
|
||||
|
||||
Tear-down is also split in a series of steps (teardown stages), which are
|
||||
stacked for execution only if the corresponding setup stage had been
|
||||
reached during the setup phase. Tear-down stages are:
|
||||
- clear_isolated_creds (defined in the base test class)
|
||||
- resource_cleanup
|
||||
"""
|
||||
|
||||
setUpClassCalled = False
|
||||
_service = None
|
||||
|
||||
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
|
||||
# at class setup time. Credential types can be 'primary', 'alt', 'admin' or
|
||||
# a list of roles - the first element of the list being a label, and the
|
||||
# rest the actual roles
|
||||
credentials = []
|
||||
# Resources required to validate a server using ssh
|
||||
validation_resources = {}
|
||||
network_resources = {}
|
||||
|
||||
# NOTE(sdague): log_format is defined inline here instead of using the oslo
|
||||
# default because going through the config path recouples config to the
|
||||
# stress tests too early, and depending on testr order will fail unit tests
|
||||
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
|
||||
'[%(name)s] %(message)s')
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# It should never be overridden by descendants
|
||||
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
|
||||
super(BaseTestCase, cls).setUpClass()
|
||||
cls.setUpClassCalled = True
|
||||
# Stack of (name, callable) to be invoked in reverse order at teardown
|
||||
cls.teardowns = []
|
||||
# All the configuration checks that may generate a skip
|
||||
cls.skip_checks()
|
||||
try:
|
||||
# Allocation of all required credentials and client managers
|
||||
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
|
||||
cls.setup_credentials()
|
||||
# Shortcuts to clients
|
||||
cls.setup_clients()
|
||||
# Additional class-wide test resources
|
||||
cls.teardowns.append(('resources', cls.resource_cleanup))
|
||||
cls.resource_setup()
|
||||
except Exception:
|
||||
etype, value, trace = sys.exc_info()
|
||||
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
|
||||
etype, cls.__name__))
|
||||
cls.tearDownClass()
|
||||
try:
|
||||
six.reraise(etype, value, trace)
|
||||
finally:
|
||||
del trace # to avoid circular refs
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
at_exit_set.discard(cls)
|
||||
# It should never be overridden by descendants
|
||||
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
|
||||
super(BaseTestCase, cls).tearDownClass()
|
||||
# Save any existing exception, we always want to re-raise the original
|
||||
# exception only
|
||||
etype, value, trace = sys.exc_info()
|
||||
# If there was no exception during setup we shall re-raise the first
|
||||
# exception in teardown
|
||||
re_raise = (etype is None)
|
||||
while cls.teardowns:
|
||||
name, teardown = cls.teardowns.pop()
|
||||
# Catch any exception in tearDown so we can re-raise the original
|
||||
# exception at the end
|
||||
try:
|
||||
teardown()
|
||||
except Exception as te:
|
||||
sys_exec_info = sys.exc_info()
|
||||
tetype = sys_exec_info[0]
|
||||
# TODO(andreaf): Till we have the ability to cleanup only
|
||||
# resources that were successfully setup in resource_cleanup,
|
||||
# log AttributeError as info instead of exception.
|
||||
if tetype is AttributeError and name == 'resources':
|
||||
LOG.info("tearDownClass of %s failed: %s" % (name, te))
|
||||
else:
|
||||
LOG.exception("teardown of %s failed: %s" % (name, te))
|
||||
if not etype:
|
||||
etype, value, trace = sys_exec_info
|
||||
# If exceptions were raised during teardown, an not before, re-raise
|
||||
# the first one
|
||||
if re_raise and etype is not None:
|
||||
try:
|
||||
six.reraise(etype, value, trace)
|
||||
finally:
|
||||
del trace # to avoid circular refs
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
"""Class level skip checks. Subclasses verify in here all
|
||||
conditions that might prevent the execution of the entire test class.
|
||||
Checks implemented here may not make use API calls, and should rely on
|
||||
configuration alone.
|
||||
In general skip checks that require an API call are discouraged.
|
||||
If one is really needed it may be implemented either in the
|
||||
resource_setup or at test level.
|
||||
"""
|
||||
if 'admin' in cls.credentials and not credentials.is_admin_available():
|
||||
msg = "Missing Identity Admin API credentials in configuration."
|
||||
raise cls.skipException(msg)
|
||||
if 'alt' in cls.credentials and not credentials.is_alt_available():
|
||||
msg = "Missing a 2nd set of API credentials in configuration."
|
||||
raise cls.skipException(msg)
|
||||
if hasattr(cls, 'identity_version'):
|
||||
if cls.identity_version == 'v2':
|
||||
if not CONF.identity_feature_enabled.api_v2:
|
||||
raise cls.skipException("Identity api v2 is not enabled")
|
||||
elif cls.identity_version == 'v3':
|
||||
if not CONF.identity_feature_enabled.api_v3:
|
||||
raise cls.skipException("Identity api v3 is not enabled")
|
||||
|
||||
@classmethod
|
||||
def setup_credentials(cls):
|
||||
"""Allocate credentials and the client managers from them.
|
||||
A test class that requires network resources must override
|
||||
setup_credentials and defined the required resources before super
|
||||
is invoked.
|
||||
"""
|
||||
for credentials_type in cls.credentials:
|
||||
# This may raise an exception in case credentials are not available
|
||||
# In that case we want to let the exception through and the test
|
||||
# fail accordingly
|
||||
if isinstance(credentials_type, six.string_types):
|
||||
manager = cls.get_client_manager(
|
||||
credential_type=credentials_type)
|
||||
setattr(cls, 'os_%s' % credentials_type, manager)
|
||||
# Setup some common aliases
|
||||
# TODO(andreaf) The aliases below are a temporary hack
|
||||
# to avoid changing too much code in one patch. They should
|
||||
# be removed eventually
|
||||
if credentials_type == 'primary':
|
||||
cls.os = cls.manager = cls.os_primary
|
||||
if credentials_type == 'admin':
|
||||
cls.os_adm = cls.admin_manager = cls.os_admin
|
||||
if credentials_type == 'alt':
|
||||
cls.alt_manager = cls.os_alt
|
||||
elif isinstance(credentials_type, list):
|
||||
manager = cls.get_client_manager(roles=credentials_type[1:],
|
||||
force_new=True)
|
||||
setattr(cls, 'os_roles_%s' % credentials_type[0], manager)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
"""Create links to the clients into the test object."""
|
||||
# TODO(andreaf) There is a fair amount of code that could me moved from
|
||||
# base / test classes in here. Ideally tests should be able to only
|
||||
# specify which client is `client` and nothing else.
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""Class level resource setup for test cases.
|
||||
"""
|
||||
if hasattr(cls, "os"):
|
||||
cls.validation_resources = vresources.create_validation_resources(
|
||||
cls.os, cls.validation_resources)
|
||||
else:
|
||||
LOG.warn("Client manager not found, validation resources not"
|
||||
" created")
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""Class level resource cleanup for test cases.
|
||||
Resource cleanup must be able to handle the case of partially setup
|
||||
resources, in case a failure during `resource_setup` should happen.
|
||||
"""
|
||||
if cls.validation_resources:
|
||||
if hasattr(cls, "os"):
|
||||
vresources.clear_validation_resources(cls.os,
|
||||
cls.validation_resources)
|
||||
cls.validation_resources = {}
|
||||
else:
|
||||
LOG.warn("Client manager not found, validation resources not"
|
||||
" deleted")
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
if not self.setUpClassCalled:
|
||||
raise RuntimeError("setUpClass does not calls the super's"
|
||||
"setUpClass in the "
|
||||
+ self.__class__.__name__)
|
||||
at_exit_set.add(self.__class__)
|
||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||
try:
|
||||
test_timeout = int(test_timeout)
|
||||
except ValueError:
|
||||
test_timeout = 0
|
||||
if test_timeout > 0:
|
||||
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||
|
||||
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
||||
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
||||
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
||||
os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
||||
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
|
||||
os.environ.get('OS_LOG_CAPTURE') != '0'):
|
||||
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
|
||||
format=self.log_format,
|
||||
level=None))
|
||||
|
||||
@property
|
||||
def credentials_provider(self):
|
||||
return self._get_credentials_provider()
|
||||
|
||||
@classmethod
|
||||
def _get_credentials_provider(cls):
|
||||
"""Returns a credentials provider
|
||||
|
||||
If no credential provider exists yet creates one.
|
||||
It uses self.identity_version if defined, or the configuration value
|
||||
"""
|
||||
if (not hasattr(cls, '_creds_provider') or not cls._creds_provider or
|
||||
not cls._creds_provider.name == cls.__name__):
|
||||
force_tenant_isolation = getattr(cls, 'force_tenant_isolation',
|
||||
False)
|
||||
identity_version = getattr(cls, 'identity_version', None)
|
||||
identity_version = identity_version or CONF.identity.auth_version
|
||||
|
||||
cls._creds_provider = credentials.get_isolated_credentials(
|
||||
name=cls.__name__, network_resources=cls.network_resources,
|
||||
force_tenant_isolation=force_tenant_isolation,
|
||||
identity_version=identity_version)
|
||||
return cls._creds_provider
|
||||
|
||||
@classmethod
|
||||
def get_client_manager(cls, credential_type=None, roles=None,
|
||||
force_new=None):
|
||||
"""Returns an OpenStack client manager
|
||||
|
||||
Returns an OpenStack client manager based on either credential_type
|
||||
or a list of roles. If neither is specified, it defaults to
|
||||
credential_type 'primary'
|
||||
:param credential_type: string - primary, alt or admin
|
||||
:param roles: list of roles
|
||||
|
||||
:returns the created client manager
|
||||
:raises skipException: if the requested credentials are not available
|
||||
"""
|
||||
if all([roles, credential_type]):
|
||||
msg = "Cannot get credentials by type and roles at the same time"
|
||||
raise ValueError(msg)
|
||||
if not any([roles, credential_type]):
|
||||
credential_type = 'primary'
|
||||
cred_provider = cls._get_credentials_provider()
|
||||
if roles:
|
||||
for role in roles:
|
||||
if not cred_provider.is_role_available(role):
|
||||
skip_msg = (
|
||||
"%s skipped because the configured credential provider"
|
||||
" is not able to provide credentials with the %s role "
|
||||
"assigned." % (cls.__name__, role))
|
||||
raise cls.skipException(skip_msg)
|
||||
params = dict(roles=roles)
|
||||
if force_new is not None:
|
||||
params.update(force_new=force_new)
|
||||
creds = cred_provider.get_creds_by_roles(**params)
|
||||
else:
|
||||
credentials_method = 'get_%s_creds' % credential_type
|
||||
if hasattr(cred_provider, credentials_method):
|
||||
creds = getattr(cred_provider, credentials_method)()
|
||||
else:
|
||||
raise exceptions.InvalidCredentials(
|
||||
"Invalid credentials type %s" % credential_type)
|
||||
return clients.Manager(credentials=creds, service=cls._service)
|
||||
|
||||
@classmethod
|
||||
def clear_isolated_creds(cls):
|
||||
"""
|
||||
Clears isolated creds if set
|
||||
"""
|
||||
if hasattr(cls, '_creds_provider'):
|
||||
cls._creds_provider.clear_isolated_creds()
|
||||
|
||||
@classmethod
|
||||
def set_validation_resources(cls, keypair=None, floating_ip=None,
|
||||
security_group=None,
|
||||
security_group_rules=None):
|
||||
"""Specify which ssh server validation resources should be created.
|
||||
Each of the argument must be set to either None, True or False, with
|
||||
None - use default from config (security groups and security group
|
||||
rules get created when set to None)
|
||||
False - Do not create the validation resource
|
||||
True - create the validation resource
|
||||
|
||||
@param keypair
|
||||
@param security_group
|
||||
@param security_group_rules
|
||||
@param floating_ip
|
||||
"""
|
||||
if not CONF.validation.run_validation:
|
||||
return
|
||||
if keypair is None:
|
||||
if CONF.validation.auth_method.lower() == "keypair":
|
||||
keypair = True
|
||||
else:
|
||||
keypair = False
|
||||
if floating_ip is None:
|
||||
if CONF.validation.connect_method.lower() == "floating":
|
||||
floating_ip = True
|
||||
else:
|
||||
floating_ip = False
|
||||
if security_group is None:
|
||||
security_group = True
|
||||
if security_group_rules is None:
|
||||
security_group_rules = True
|
||||
if not cls.validation_resources:
|
||||
cls.validation_resources = {
|
||||
'keypair': keypair,
|
||||
'security_group': security_group,
|
||||
'security_group_rules': security_group_rules,
|
||||
'floating_ip': floating_ip}
|
||||
|
||||
@classmethod
|
||||
def set_network_resources(cls, network=False, router=False, subnet=False,
|
||||
dhcp=False):
|
||||
"""Specify which network resources should be created
|
||||
|
||||
@param network
|
||||
@param router
|
||||
@param subnet
|
||||
@param dhcp
|
||||
"""
|
||||
# network resources should be set only once from callers
|
||||
# in order to ensure that even if it's called multiple times in
|
||||
# a chain of overloaded methods, the attribute is set only
|
||||
# in the leaf class
|
||||
if not cls.network_resources:
|
||||
cls.network_resources = {
|
||||
'network': network,
|
||||
'router': router,
|
||||
'subnet': subnet,
|
||||
'dhcp': dhcp}
|
||||
|
||||
@classmethod
|
||||
def get_tenant_network(cls):
|
||||
"""Get the network to be used in testing
|
||||
|
||||
:return: network dict including 'id' and 'name'
|
||||
"""
|
||||
# Make sure isolated_creds exists and get a network client
|
||||
networks_client = cls.get_client_manager().networks_client
|
||||
cred_provider = cls._get_credentials_provider()
|
||||
# In case of nova network, isolated tenants are not able to list the
|
||||
# network configured in fixed_network_name, even if the can use it
|
||||
# for their servers, so using an admin network client to validate
|
||||
# the network name
|
||||
if (not CONF.service_available.neutron and
|
||||
credentials.is_admin_available()):
|
||||
admin_creds = cred_provider.get_admin_creds()
|
||||
networks_client = clients.Manager(admin_creds).networks_client
|
||||
return fixed_network.get_tenant_network(cred_provider,
|
||||
networks_client)
|
||||
|
||||
def assertEmpty(self, list, msg=None):
|
||||
self.assertTrue(len(list) == 0, msg)
|
||||
|
||||
def assertNotEmpty(self, list, msg=None):
|
||||
self.assertTrue(len(list) > 0, msg)
|
||||
|
||||
|
||||
class NegativeAutoTest(BaseTestCase):
|
||||
|
||||
_resources = {}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(NegativeAutoTest, cls).setUpClass()
|
||||
os = cls.get_client_manager(credential_type='primary')
|
||||
cls.client = os.negative_client
|
||||
|
||||
@staticmethod
|
||||
def load_tests(*args):
|
||||
"""
|
||||
Wrapper for testscenarios to set the mandatory scenarios variable
|
||||
only in case a real test loader is in place. Will be automatically
|
||||
called in case the variable "load_tests" is set.
|
||||
"""
|
||||
if getattr(args[0], 'suiteClass', None) is not None:
|
||||
loader, standard_tests, pattern = args
|
||||
else:
|
||||
standard_tests, module, loader = args
|
||||
for test in testtools.iterate_tests(standard_tests):
|
||||
schema = getattr(test, '_schema', None)
|
||||
if schema is not None:
|
||||
setattr(test, 'scenarios',
|
||||
NegativeAutoTest.generate_scenario(schema))
|
||||
return testscenarios.load_tests_apply_scenarios(*args)
|
||||
|
||||
@staticmethod
|
||||
def generate_scenario(description):
|
||||
"""
|
||||
Generates the test scenario list for a given description.
|
||||
|
||||
:param description: A file or dictionary with the following entries:
|
||||
name (required) name for the api
|
||||
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
|
||||
url (required) the url to be appended to the catalog url with '%s'
|
||||
for each resource mentioned
|
||||
resources: (optional) A list of resource names such as "server",
|
||||
"flavor", etc. with an element for each '%s' in the url. This
|
||||
method will call self.get_resource for each element when
|
||||
constructing the positive test case template so negative
|
||||
subclasses are expected to return valid resource ids when
|
||||
appropriate.
|
||||
json-schema (optional) A valid json schema that will be used to
|
||||
create invalid data for the api calls. For "GET" and "HEAD",
|
||||
the data is used to generate query strings appended to the url,
|
||||
otherwise for the body of the http call.
|
||||
"""
|
||||
LOG.debug(description)
|
||||
generator = importutils.import_class(
|
||||
CONF.negative.test_generator)()
|
||||
generator.validate_schema(description)
|
||||
schema = description.get("json-schema", None)
|
||||
resources = description.get("resources", [])
|
||||
scenario_list = []
|
||||
expected_result = None
|
||||
for resource in resources:
|
||||
if isinstance(resource, dict):
|
||||
expected_result = resource['expected_result']
|
||||
resource = resource['name']
|
||||
LOG.debug("Add resource to test %s" % resource)
|
||||
scn_name = "inv_res_%s" % (resource)
|
||||
scenario_list.append((scn_name, {"resource": (resource,
|
||||
str(uuid.uuid4())),
|
||||
"expected_result": expected_result
|
||||
}))
|
||||
if schema is not None:
|
||||
for scenario in generator.generate_scenarios(schema):
|
||||
scenario_list.append((scenario['_negtest_name'],
|
||||
scenario))
|
||||
LOG.debug(scenario_list)
|
||||
return scenario_list
|
||||
|
||||
def execute(self, description):
|
||||
"""
|
||||
Execute a http call on an api that are expected to
|
||||
result in client errors. First it uses invalid resources that are part
|
||||
of the url, and then invalid data for queries and http request bodies.
|
||||
|
||||
:param description: A json file or dictionary with the following
|
||||
entries:
|
||||
name (required) name for the api
|
||||
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
|
||||
url (required) the url to be appended to the catalog url with '%s'
|
||||
for each resource mentioned
|
||||
resources: (optional) A list of resource names such as "server",
|
||||
"flavor", etc. with an element for each '%s' in the url. This
|
||||
method will call self.get_resource for each element when
|
||||
constructing the positive test case template so negative
|
||||
subclasses are expected to return valid resource ids when
|
||||
appropriate.
|
||||
json-schema (optional) A valid json schema that will be used to
|
||||
create invalid data for the api calls. For "GET" and "HEAD",
|
||||
the data is used to generate query strings appended to the url,
|
||||
otherwise for the body of the http call.
|
||||
|
||||
"""
|
||||
LOG.info("Executing %s" % description["name"])
|
||||
LOG.debug(description)
|
||||
generator = importutils.import_class(
|
||||
CONF.negative.test_generator)()
|
||||
schema = description.get("json-schema", None)
|
||||
method = description["http-method"]
|
||||
url = description["url"]
|
||||
expected_result = None
|
||||
if "default_result_code" in description:
|
||||
expected_result = description["default_result_code"]
|
||||
|
||||
resources = [self.get_resource(r) for
|
||||
r in description.get("resources", [])]
|
||||
|
||||
if hasattr(self, "resource"):
|
||||
# Note(mkoderer): The resources list already contains an invalid
|
||||
# entry (see get_resource).
|
||||
# We just send a valid json-schema with it
|
||||
valid_schema = None
|
||||
if schema:
|
||||
valid_schema = \
|
||||
valid.ValidTestGenerator().generate_valid(schema)
|
||||
new_url, body = self._http_arguments(valid_schema, url, method)
|
||||
elif hasattr(self, "_negtest_name"):
|
||||
schema_under_test = \
|
||||
valid.ValidTestGenerator().generate_valid(schema)
|
||||
local_expected_result = \
|
||||
generator.generate_payload(self, schema_under_test)
|
||||
if local_expected_result is not None:
|
||||
expected_result = local_expected_result
|
||||
new_url, body = \
|
||||
self._http_arguments(schema_under_test, url, method)
|
||||
else:
|
||||
raise Exception("testscenarios are not active. Please make sure "
|
||||
"that your test runner supports the load_tests "
|
||||
"mechanism")
|
||||
|
||||
if "admin_client" in description and description["admin_client"]:
|
||||
if not credentials.is_admin_available():
|
||||
msg = ("Missing Identity Admin API credentials in"
|
||||
"configuration.")
|
||||
raise self.skipException(msg)
|
||||
creds = self.credentials_provider.get_admin_creds()
|
||||
os_adm = clients.Manager(credentials=creds)
|
||||
client = os_adm.negative_client
|
||||
else:
|
||||
client = self.client
|
||||
resp, resp_body = client.send_request(method, new_url,
|
||||
resources, body=body)
|
||||
self._check_negative_response(expected_result, resp.status, resp_body)
|
||||
|
||||
def _http_arguments(self, json_dict, url, method):
|
||||
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
|
||||
if not json_dict:
|
||||
return url, None
|
||||
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
|
||||
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
|
||||
else:
|
||||
return url, json.dumps(json_dict)
|
||||
|
||||
def _check_negative_response(self, expected_result, result, body):
|
||||
self.assertTrue(result >= 400 and result < 500 and result != 413,
|
||||
"Expected client error, got %s:%s" %
|
||||
(result, body))
|
||||
self.assertTrue(expected_result is None or expected_result == result,
|
||||
"Expected %s, got %s:%s" %
|
||||
(expected_result, result, body))
|
||||
|
||||
@classmethod
|
||||
def set_resource(cls, name, resource):
|
||||
"""
|
||||
This function can be used in setUpClass context to register a resoruce
|
||||
for a test.
|
||||
|
||||
:param name: The name of the kind of resource such as "flavor", "role",
|
||||
etc.
|
||||
:resource: The id of the resource
|
||||
"""
|
||||
cls._resources[name] = resource
|
||||
|
||||
def get_resource(self, name):
|
||||
"""
|
||||
Return a valid uuid for a type of resource. If a real resource is
|
||||
needed as part of a url then this method should return one. Otherwise
|
||||
it can return None.
|
||||
|
||||
:param name: The name of the kind of resource such as "flavor", "role",
|
||||
etc.
|
||||
"""
|
||||
if isinstance(name, dict):
|
||||
name = name['name']
|
||||
if hasattr(self, "resource") and self.resource[0] == name:
|
||||
LOG.debug("Return invalid resource (%s) value: %s" %
|
||||
(self.resource[0], self.resource[1]))
|
||||
return self.resource[1]
|
||||
if name in self._resources:
|
||||
return self._resources[name]
|
||||
return None
|
||||
|
||||
|
||||
def SimpleNegativeAutoTest(klass):
|
||||
"""
|
||||
This decorator registers a test function on basis of the class name.
|
||||
"""
|
||||
@attr(type=['negative'])
|
||||
def generic_test(self):
|
||||
if hasattr(self, '_schema'):
|
||||
self.execute(self._schema)
|
||||
|
||||
cn = klass.__name__
|
||||
cn = cn.replace('JSON', '')
|
||||
cn = cn.replace('Test', '')
|
||||
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
|
||||
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
|
||||
func_name = 'test_%s' % lower_cn
|
||||
setattr(klass, func_name, generic_test)
|
||||
return klass
|
||||
|
||||
|
||||
def call_until_true(func, duration, sleep_for):
|
||||
"""
|
||||
Call the given function until it returns True (and return True) or
|
||||
until the specified duration (in seconds) elapses (and return
|
||||
False).
|
||||
|
||||
:param func: A zero argument callable that returns True on success.
|
||||
:param duration: The number of seconds for which to attempt a
|
||||
successful call of the function.
|
||||
:param sleep_for: The number of seconds to sleep after an unsuccessful
|
||||
invocation of the function.
|
||||
"""
|
||||
now = time.time()
|
||||
timeout = now + duration
|
||||
while now < timeout:
|
||||
if func():
|
||||
return True
|
||||
time.sleep(sleep_for)
|
||||
now = time.time()
|
||||
return False
|
|
@ -0,0 +1,73 @@
|
|||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib.common.utils import data_utils
|
||||
|
||||
from neutron_lbaas.tests.tempest.v1.api import base
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
|
||||
|
||||
class LBaaSAgentSchedulerTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List pools the given LBaaS agent is hosting.
|
||||
Show a LBaaS agent hosting the given pool.
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [networki-feature-enabled] section of
|
||||
etc/tempest.conf:
|
||||
|
||||
api_extensions
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LBaaSAgentSchedulerTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
|
||||
msg = "LBaaS Agent Scheduler Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
pool_name = data_utils.rand_name('pool-')
|
||||
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
|
||||
"HTTP", cls.subnet)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e5ea8b15-4f44-4350-963c-e0fcb533ee79')
|
||||
def test_list_pools_on_lbaas_agent(self):
|
||||
found = False
|
||||
body = self.admin_client.list_agents(
|
||||
agent_type="Loadbalancer agent")
|
||||
agents = body['agents']
|
||||
for a in agents:
|
||||
msg = 'Load Balancer agent expected'
|
||||
self.assertEqual(a['agent_type'], 'Loadbalancer agent', msg)
|
||||
body = (
|
||||
self.admin_client.list_pools_hosted_by_one_lbaas_agent(
|
||||
a['id']))
|
||||
pools = body['pools']
|
||||
if self.pool['id'] in [p['id'] for p in pools]:
|
||||
found = True
|
||||
msg = 'Unable to find Load Balancer agent hosting pool'
|
||||
self.assertTrue(found, msg)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e2745593-fd79-4b98-a262-575fd7865796')
|
||||
def test_show_lbaas_agent_hosting_pool(self):
|
||||
body = self.admin_client.show_lbaas_agent_hosting_pool(
|
||||
self.pool['id'])
|
||||
self.assertEqual('Loadbalancer agent', body['agent']['agent_type'])
|
|
@ -0,0 +1,115 @@
|
|||
# Copyright 2014 Mirantis.inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib.common.utils import data_utils
|
||||
|
||||
from neutron_lbaas.tests.tempest.v1.api import base
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
|
||||
|
||||
class LoadBalancerAdminTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Test admin actions for load balancer.
|
||||
|
||||
Create VIP for another tenant
|
||||
Create health monitor for another tenant
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancerAdminTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas', 'network'):
|
||||
msg = "lbaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.force_tenant_isolation = True
|
||||
manager = cls.get_client_manager()
|
||||
cls.client = manager.network_client
|
||||
cls.tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
|
||||
"ROUND_ROBIN", "HTTP", cls.subnet)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6b0a20d8-4fcd-455e-b54f-ec4db5199518')
|
||||
def test_create_vip_as_admin_for_another_tenant(self):
|
||||
name = data_utils.rand_name('vip-')
|
||||
body = self.admin_client.create_pool(
|
||||
name=data_utils.rand_name('pool-'),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTP",
|
||||
subnet_id=self.subnet['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.admin_client.delete_pool, pool['id'])
|
||||
body = self.admin_client.create_vip(name=name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet['id'],
|
||||
pool_id=pool['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
vip = body['vip']
|
||||
self.addCleanup(self.admin_client.delete_vip, vip['id'])
|
||||
self.assertIsNotNone(vip['id'])
|
||||
self.assertEqual(self.tenant_id, vip['tenant_id'])
|
||||
body = self.client.show_vip(vip['id'])
|
||||
show_vip = body['vip']
|
||||
self.assertEqual(vip['id'], show_vip['id'])
|
||||
self.assertEqual(vip['name'], show_vip['name'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('74552cfc-ab78-4fb6-825b-f67bca379921')
|
||||
def test_create_health_monitor_as_admin_for_another_tenant(self):
|
||||
body = (
|
||||
self.admin_client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="TCP",
|
||||
timeout=1,
|
||||
tenant_id=self.tenant_id))
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.admin_client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
self.assertIsNotNone(health_monitor['id'])
|
||||
self.assertEqual(self.tenant_id, health_monitor['tenant_id'])
|
||||
body = self.client.show_health_monitor(health_monitor['id'])
|
||||
show_health_monitor = body['health_monitor']
|
||||
self.assertEqual(health_monitor['id'], show_health_monitor['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('266a192d-3c22-46c4-a8fb-802450301e82')
|
||||
def test_create_pool_from_admin_user_other_tenant(self):
|
||||
body = self.admin_client.create_pool(
|
||||
name=data_utils.rand_name('pool-'),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTP",
|
||||
subnet_id=self.subnet['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.admin_client.delete_pool, pool['id'])
|
||||
self.assertIsNotNone(pool['id'])
|
||||
self.assertEqual(self.tenant_id, pool['tenant_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('158bb272-b9ed-4cfc-803c-661dac46f783')
|
||||
def test_create_member_from_admin_user_other_tenant(self):
|
||||
body = self.admin_client.create_member(address="10.0.9.47",
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
member = body['member']
|
||||
self.addCleanup(self.admin_client.delete_member, member['id'])
|
||||
self.assertIsNotNone(member['id'])
|
||||
self.assertEqual(self.tenant_id, member['tenant_id'])
|
|
@ -0,0 +1,466 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron_lbaas.tests.tempest.v1.api import clients
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import exceptions
|
||||
import neutron_lbaas.tests.tempest.lib.test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseNetworkTest(neutron_lbaas.tests.tempest.lib.test.BaseTestCase):
|
||||
|
||||
"""
|
||||
Base class for the Neutron tests that use the Tempest Neutron REST client
|
||||
|
||||
Per the Neutron API Guide, API v1.x was removed from the source code tree
|
||||
(docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
|
||||
Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
|
||||
following options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
tenant_network_cidr with a block of cidr's from which smaller blocks
|
||||
can be allocated for tenant networks
|
||||
|
||||
tenant_network_mask_bits with the mask bits to be used to partition the
|
||||
block defined by tenant-network_cidr
|
||||
|
||||
Finally, it is assumed that the following option is defined in the
|
||||
[service_available] section of etc/tempest.conf
|
||||
|
||||
neutron as True
|
||||
"""
|
||||
|
||||
force_tenant_isolation = False
|
||||
|
||||
# Default to ipv4.
|
||||
_ip_version = 4
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
# Create no network resources for these test.
|
||||
cls.set_network_resources()
|
||||
super(BaseNetworkTest, cls).resource_setup()
|
||||
if not CONF.service_available.neutron:
|
||||
raise cls.skipException("Neutron support is required")
|
||||
if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
|
||||
raise cls.skipException("IPv6 Tests are disabled.")
|
||||
|
||||
os = cls.get_client_manager()
|
||||
|
||||
cls.network_cfg = CONF.network
|
||||
cls.client = os.network_client
|
||||
cls.networks = []
|
||||
cls.shared_networks = []
|
||||
cls.subnets = []
|
||||
cls.ports = []
|
||||
cls.routers = []
|
||||
cls.pools = []
|
||||
cls.vips = []
|
||||
cls.members = []
|
||||
cls.health_monitors = []
|
||||
cls.vpnservices = []
|
||||
cls.ikepolicies = []
|
||||
cls.floating_ips = []
|
||||
cls.metering_labels = []
|
||||
cls.metering_label_rules = []
|
||||
cls.fw_rules = []
|
||||
cls.fw_policies = []
|
||||
cls.ipsecpolicies = []
|
||||
cls.ethertype = "IPv" + str(cls._ip_version)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if CONF.service_available.neutron:
|
||||
# Clean up ipsec policies
|
||||
for ipsecpolicy in cls.ipsecpolicies:
|
||||
cls._try_delete_resource(cls.client.delete_ipsecpolicy,
|
||||
ipsecpolicy['id'])
|
||||
# Clean up firewall policies
|
||||
for fw_policy in cls.fw_policies:
|
||||
cls._try_delete_resource(cls.client.delete_firewall_policy,
|
||||
fw_policy['id'])
|
||||
# Clean up firewall rules
|
||||
for fw_rule in cls.fw_rules:
|
||||
cls._try_delete_resource(cls.client.delete_firewall_rule,
|
||||
fw_rule['id'])
|
||||
# Clean up ike policies
|
||||
for ikepolicy in cls.ikepolicies:
|
||||
cls._try_delete_resource(cls.client.delete_ikepolicy,
|
||||
ikepolicy['id'])
|
||||
# Clean up vpn services
|
||||
for vpnservice in cls.vpnservices:
|
||||
cls._try_delete_resource(cls.client.delete_vpnservice,
|
||||
vpnservice['id'])
|
||||
# Clean up floating IPs
|
||||
for floating_ip in cls.floating_ips:
|
||||
cls._try_delete_resource(cls.client.delete_floatingip,
|
||||
floating_ip['id'])
|
||||
# Clean up routers
|
||||
for router in cls.routers:
|
||||
cls._try_delete_resource(cls.delete_router,
|
||||
router)
|
||||
|
||||
# Clean up health monitors
|
||||
for health_monitor in cls.health_monitors:
|
||||
cls._try_delete_resource(cls.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
# Clean up members
|
||||
for member in cls.members:
|
||||
cls._try_delete_resource(cls.client.delete_member,
|
||||
member['id'])
|
||||
# Clean up vips
|
||||
for vip in cls.vips:
|
||||
cls._try_delete_resource(cls.client.delete_vip,
|
||||
vip['id'])
|
||||
# Clean up pools
|
||||
for pool in cls.pools:
|
||||
cls._try_delete_resource(cls.client.delete_pool,
|
||||
pool['id'])
|
||||
# Clean up metering label rules
|
||||
for metering_label_rule in cls.metering_label_rules:
|
||||
cls._try_delete_resource(
|
||||
cls.admin_client.delete_metering_label_rule,
|
||||
metering_label_rule['id'])
|
||||
# Clean up metering labels
|
||||
for metering_label in cls.metering_labels:
|
||||
cls._try_delete_resource(
|
||||
cls.admin_client.delete_metering_label,
|
||||
metering_label['id'])
|
||||
# Clean up ports
|
||||
for port in cls.ports:
|
||||
cls._try_delete_resource(cls.client.delete_port,
|
||||
port['id'])
|
||||
# Clean up subnets
|
||||
for subnet in cls.subnets:
|
||||
cls._try_delete_resource(cls.client.delete_subnet,
|
||||
subnet['id'])
|
||||
# Clean up networks
|
||||
for network in cls.networks:
|
||||
cls._try_delete_resource(cls.client.delete_network,
|
||||
network['id'])
|
||||
|
||||
# Clean up shared networks
|
||||
for network in cls.shared_networks:
|
||||
cls._try_delete_resource(cls.admin_client.delete_network,
|
||||
network['id'])
|
||||
|
||||
cls.clear_isolated_creds()
|
||||
super(BaseNetworkTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def _try_delete_resource(self, delete_callable, *args, **kwargs):
|
||||
"""Cleanup resources in case of test-failure
|
||||
|
||||
Some resources are explicitly deleted by the test.
|
||||
If the test failed to delete a resource, this method will execute
|
||||
the appropriate delete methods. Otherwise, the method ignores NotFound
|
||||
exceptions thrown for resources that were correctly deleted by the
|
||||
test.
|
||||
|
||||
:param delete_callable: delete method
|
||||
:param args: arguments for delete method
|
||||
:param kwargs: keyword arguments for delete method
|
||||
"""
|
||||
try:
|
||||
delete_callable(*args, **kwargs)
|
||||
# if resource is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def create_network(cls, network_name=None):
|
||||
"""Wrapper utility that returns a test network."""
|
||||
network_name = network_name or data_utils.rand_name('test-network-')
|
||||
|
||||
body = cls.client.create_network(name=network_name)
|
||||
network = body['network']
|
||||
cls.networks.append(network)
|
||||
return network
|
||||
|
||||
@classmethod
|
||||
def create_shared_network(cls, network_name=None):
|
||||
network_name = network_name or data_utils.rand_name('sharednetwork-')
|
||||
post_body = {'name': network_name, 'shared': True}
|
||||
body = cls.admin_client.create_network(**post_body)
|
||||
network = body['network']
|
||||
cls.shared_networks.append(network)
|
||||
return network
|
||||
|
||||
@classmethod
|
||||
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, client=None, **kwargs):
|
||||
"""Wrapper utility that returns a test subnet."""
|
||||
|
||||
# allow tests to use admin client
|
||||
if not client:
|
||||
client = cls.client
|
||||
|
||||
# The cidr and mask_bits depend on the ip version.
|
||||
ip_version = ip_version if ip_version is not None else cls._ip_version
|
||||
gateway_not_set = gateway == ''
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
body = client.create_subnet(
|
||||
network_id=network['id'],
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
break
|
||||
except lib_exc.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
subnet = body['subnet']
|
||||
cls.subnets.append(subnet)
|
||||
return subnet
|
||||
|
||||
@classmethod
|
||||
def create_port(cls, network, **kwargs):
|
||||
"""Wrapper utility that returns a test port."""
|
||||
body = cls.client.create_port(network_id=network['id'],
|
||||
**kwargs)
|
||||
port = body['port']
|
||||
cls.ports.append(port)
|
||||
return port
|
||||
|
||||
@classmethod
|
||||
def update_port(cls, port, **kwargs):
|
||||
"""Wrapper utility that updates a test port."""
|
||||
body = cls.client.update_port(port['id'],
|
||||
**kwargs)
|
||||
return body['port']
|
||||
|
||||
@classmethod
|
||||
def create_router(cls, router_name=None, admin_state_up=False,
|
||||
external_network_id=None, enable_snat=None,
|
||||
**kwargs):
|
||||
ext_gw_info = {}
|
||||
if external_network_id:
|
||||
ext_gw_info['network_id'] = external_network_id
|
||||
if enable_snat:
|
||||
ext_gw_info['enable_snat'] = enable_snat
|
||||
body = cls.client.create_router(
|
||||
router_name, external_gateway_info=ext_gw_info,
|
||||
admin_state_up=admin_state_up, **kwargs)
|
||||
router = body['router']
|
||||
cls.routers.append(router)
|
||||
return router
|
||||
|
||||
@classmethod
|
||||
def create_floatingip(cls, external_network_id):
|
||||
"""Wrapper utility that returns a test floating IP."""
|
||||
body = cls.client.create_floatingip(
|
||||
floating_network_id=external_network_id)
|
||||
fip = body['floatingip']
|
||||
cls.floating_ips.append(fip)
|
||||
return fip
|
||||
|
||||
@classmethod
|
||||
def create_pool(cls, name, lb_method, protocol, subnet):
|
||||
"""Wrapper utility that returns a test pool."""
|
||||
body = cls.client.create_pool(
|
||||
name=name,
|
||||
lb_method=lb_method,
|
||||
protocol=protocol,
|
||||
subnet_id=subnet['id'])
|
||||
pool = body['pool']
|
||||
cls.pools.append(pool)
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def update_pool(cls, name):
|
||||
"""Wrapper utility that returns a test pool."""
|
||||
body = cls.client.update_pool(name=name)
|
||||
pool = body['pool']
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def create_vip(cls, name, protocol, protocol_port, subnet, pool):
|
||||
"""Wrapper utility that returns a test vip."""
|
||||
body = cls.client.create_vip(name=name,
|
||||
protocol=protocol,
|
||||
protocol_port=protocol_port,
|
||||
subnet_id=subnet['id'],
|
||||
pool_id=pool['id'])
|
||||
vip = body['vip']
|
||||
cls.vips.append(vip)
|
||||
return vip
|
||||
|
||||
@classmethod
|
||||
def update_vip(cls, name):
|
||||
body = cls.client.update_vip(name=name)
|
||||
vip = body['vip']
|
||||
return vip
|
||||
|
||||
@classmethod
|
||||
def create_member(cls, protocol_port, pool, ip_version=None):
|
||||
"""Wrapper utility that returns a test member."""
|
||||
ip_version = ip_version if ip_version is not None else cls._ip_version
|
||||
member_address = "fd00::abcd" if ip_version == 6 else "10.0.9.46"
|
||||
body = cls.client.create_member(address=member_address,
|
||||
protocol_port=protocol_port,
|
||||
pool_id=pool['id'])
|
||||
member = body['member']
|
||||
cls.members.append(member)
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def update_member(cls, admin_state_up):
|
||||
body = cls.client.update_member(admin_state_up=admin_state_up)
|
||||
member = body['member']
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def create_health_monitor(cls, delay, max_retries, Type, timeout):
|
||||
"""Wrapper utility that returns a test health monitor."""
|
||||
body = cls.client.create_health_monitor(delay=delay,
|
||||
max_retries=max_retries,
|
||||
type=Type,
|
||||
timeout=timeout)
|
||||
health_monitor = body['health_monitor']
|
||||
cls.health_monitors.append(health_monitor)
|
||||
return health_monitor
|
||||
|
||||
@classmethod
|
||||
def update_health_monitor(cls, admin_state_up):
|
||||
body = cls.client.update_vip(admin_state_up=admin_state_up)
|
||||
health_monitor = body['health_monitor']
|
||||
return health_monitor
|
||||
|
||||
@classmethod
|
||||
def create_router_interface(cls, router_id, subnet_id):
|
||||
"""Wrapper utility that returns a router interface."""
|
||||
interface = cls.client.add_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
return interface
|
||||
|
||||
@classmethod
|
||||
def create_vpnservice(cls, subnet_id, router_id):
|
||||
"""Wrapper utility that returns a test vpn service."""
|
||||
body = cls.client.create_vpnservice(
|
||||
subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
|
||||
name=data_utils.rand_name("vpnservice-"))
|
||||
vpnservice = body['vpnservice']
|
||||
cls.vpnservices.append(vpnservice)
|
||||
return vpnservice
|
||||
|
||||
@classmethod
|
||||
def create_ikepolicy(cls, name):
|
||||
"""Wrapper utility that returns a test ike policy."""
|
||||
body = cls.client.create_ikepolicy(name=name)
|
||||
ikepolicy = body['ikepolicy']
|
||||
cls.ikepolicies.append(ikepolicy)
|
||||
return ikepolicy
|
||||
|
||||
@classmethod
|
||||
def create_firewall_rule(cls, action, protocol):
|
||||
"""Wrapper utility that returns a test firewall rule."""
|
||||
body = cls.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action=action,
|
||||
protocol=protocol)
|
||||
fw_rule = body['firewall_rule']
|
||||
cls.fw_rules.append(fw_rule)
|
||||
return fw_rule
|
||||
|
||||
@classmethod
|
||||
def create_firewall_policy(cls):
|
||||
"""Wrapper utility that returns a test firewall policy."""
|
||||
body = cls.client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy = body['firewall_policy']
|
||||
cls.fw_policies.append(fw_policy)
|
||||
return fw_policy
|
||||
|
||||
@classmethod
|
||||
def delete_router(cls, router):
|
||||
body = cls.client.list_router_interfaces(router['id'])
|
||||
interfaces = body['ports']
|
||||
for i in interfaces:
|
||||
try:
|
||||
cls.client.remove_router_interface_with_subnet_id(
|
||||
router['id'], i['fixed_ips'][0]['subnet_id'])
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
cls.client.delete_router(router['id'])
|
||||
|
||||
@classmethod
|
||||
def create_ipsecpolicy(cls, name):
|
||||
"""Wrapper utility that returns a test ipsec policy."""
|
||||
body = cls.client.create_ipsecpolicy(name=name)
|
||||
ipsecpolicy = body['ipsecpolicy']
|
||||
cls.ipsecpolicies.append(ipsecpolicy)
|
||||
return ipsecpolicy
|
||||
|
||||
|
||||
class BaseAdminNetworkTest(BaseNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseAdminNetworkTest, cls).resource_setup()
|
||||
|
||||
try:
|
||||
creds = cls.isolated_creds.get_admin_creds()
|
||||
cls.os_adm = clients.Manager(credentials=creds)
|
||||
except NotImplementedError:
|
||||
msg = ("Missing Administrative Network API credentials "
|
||||
"in configuration.")
|
||||
raise cls.skipException(msg)
|
||||
cls.admin_client = cls.os_adm.network_client
|
||||
|
||||
@classmethod
|
||||
def create_metering_label(cls, name, description):
|
||||
"""Wrapper utility that returns a test metering label."""
|
||||
body = cls.admin_client.create_metering_label(
|
||||
description=description,
|
||||
name=data_utils.rand_name("metering-label"))
|
||||
metering_label = body['metering_label']
|
||||
cls.metering_labels.append(metering_label)
|
||||
return metering_label
|
||||
|
||||
@classmethod
|
||||
def create_metering_label_rule(cls, remote_ip_prefix, direction,
|
||||
metering_label_id):
|
||||
"""Wrapper utility that returns a test metering label rule."""
|
||||
body = cls.admin_client.create_metering_label_rule(
|
||||
remote_ip_prefix=remote_ip_prefix, direction=direction,
|
||||
metering_label_id=metering_label_id)
|
||||
metering_label_rule = body['metering_label_rule']
|
||||
cls.metering_label_rules.append(metering_label_rule)
|
||||
return metering_label_rule
|
|
@ -0,0 +1,120 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import cred_provider
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import manager
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v2.json.identity_client import \
|
||||
IdentityClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v2.json.token_client import \
|
||||
TokenClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.credentials_client \
|
||||
import CredentialsClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.endpoints_client import \
|
||||
EndPointClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.identity_client import \
|
||||
IdentityV3ClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.policy_client import \
|
||||
PolicyClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.region_client import \
|
||||
RegionClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.service_client import \
|
||||
ServiceClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.token_client import \
|
||||
V3TokenClientJSON
|
||||
from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import \
|
||||
NetworkClientJSON
|
||||
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Manager(manager.Manager):
|
||||
|
||||
"""
|
||||
Top level manager for OpenStack tempest clients
|
||||
"""
|
||||
|
||||
default_params = {
|
||||
'disable_ssl_certificate_validation':
|
||||
CONF.identity.disable_ssl_certificate_validation,
|
||||
'ca_certs': CONF.identity.ca_certificates_file,
|
||||
'trace_requests': CONF.debug.trace_requests
|
||||
}
|
||||
|
||||
# NOTE: Tempest uses timeout values of compute API if project specific
|
||||
# timeout values don't exist.
|
||||
default_params_with_timeout_values = {
|
||||
'build_interval': CONF.compute.build_interval,
|
||||
'build_timeout': CONF.compute.build_timeout
|
||||
}
|
||||
default_params_with_timeout_values.update(default_params)
|
||||
|
||||
def __init__(self, credentials=None, service=None):
|
||||
super(Manager, self).__init__(credentials=credentials)
|
||||
|
||||
self._set_identity_clients()
|
||||
|
||||
self.network_client = NetworkClientJSON(
|
||||
self.auth_provider,
|
||||
CONF.network.catalog_type,
|
||||
CONF.network.region or CONF.identity.region,
|
||||
endpoint_type=CONF.network.endpoint_type,
|
||||
build_interval=CONF.network.build_interval,
|
||||
build_timeout=CONF.network.build_timeout,
|
||||
**self.default_params)
|
||||
|
||||
def _set_identity_clients(self):
|
||||
params = {
|
||||
'service': CONF.identity.catalog_type,
|
||||
'region': CONF.identity.region,
|
||||
'endpoint_type': 'adminURL'
|
||||
}
|
||||
params.update(self.default_params_with_timeout_values)
|
||||
|
||||
self.identity_client = IdentityClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.endpoints_client = EndPointClientJSON(self.auth_provider,
|
||||
**params)
|
||||
self.service_client = ServiceClientJSON(self.auth_provider, **params)
|
||||
self.policy_client = PolicyClientJSON(self.auth_provider, **params)
|
||||
self.region_client = RegionClientJSON(self.auth_provider, **params)
|
||||
self.credentials_client = CredentialsClientJSON(self.auth_provider,
|
||||
**params)
|
||||
# Token clients do not use the catalog. They only need default_params.
|
||||
self.token_client = TokenClientJSON(CONF.identity.uri,
|
||||
**self.default_params)
|
||||
if CONF.identity_feature_enabled.api_v3:
|
||||
self.token_v3_client = V3TokenClientJSON(CONF.identity.uri_v3,
|
||||
**self.default_params)
|
||||
|
||||
|
||||
class AdminManager(Manager):
|
||||
|
||||
"""
|
||||
Manager object that uses the admin credentials for its
|
||||
managed client objects
|
||||
"""
|
||||
|
||||
def __init__(self, service=None):
|
||||
super(AdminManager, self).__init__(
|
||||
credentials=cred_provider.get_configured_credentials(
|
||||
'identity_admin'),
|
||||
service=service)
|
|
@ -0,0 +1,453 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import decorators
|
||||
|
||||
from neutron_lbaas.tests.tempest.v1.api import base
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
|
||||
|
||||
class LoadBalancerTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
create vIP, and Pool
|
||||
show vIP
|
||||
list vIP
|
||||
update vIP
|
||||
delete vIP
|
||||
update pool
|
||||
delete pool
|
||||
show pool
|
||||
list pool
|
||||
health monitoring operations
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancerTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas', 'network'):
|
||||
msg = "lbaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
pool_name = data_utils.rand_name('pool-')
|
||||
vip_name = data_utils.rand_name('vip-')
|
||||
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
|
||||
"HTTP", cls.subnet)
|
||||
cls.vip = cls.create_vip(name=vip_name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet=cls.subnet,
|
||||
pool=cls.pool)
|
||||
cls.member = cls.create_member(80, cls.pool, cls._ip_version)
|
||||
cls.member_address = ("10.0.9.47" if cls._ip_version == 4
|
||||
else "2015::beef")
|
||||
cls.health_monitor = cls.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
Type="TCP",
|
||||
timeout=1)
|
||||
|
||||
def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs):
|
||||
create_obj = getattr(self.client, 'create_' + obj_name)
|
||||
delete_obj = getattr(self.client, 'delete_' + obj_name)
|
||||
list_objs = getattr(self.client, 'list_' + obj_name + 's')
|
||||
|
||||
body = create_obj(**kwargs)
|
||||
obj = body[obj_name]
|
||||
self.addCleanup(delete_obj, obj['id'])
|
||||
for key, value in obj.iteritems():
|
||||
# It is not relevant to filter by all arguments. That is why
|
||||
# there is a list of attr to except
|
||||
if key not in attr_exceptions:
|
||||
body = list_objs(**{key: value})
|
||||
objs = [v[key] for v in body[obj_name + 's']]
|
||||
self.assertIn(value, objs)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c96dbfab-4a80-4e74-a535-e950b5bedd47')
|
||||
def test_list_vips(self):
|
||||
# Verify the vIP exists in the list of all vIPs
|
||||
body = self.client.list_vips()
|
||||
vips = body['vips']
|
||||
self.assertIn(self.vip['id'], [v['id'] for v in vips])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b8853f65-5089-4e69-befd-041a143427ff')
|
||||
def test_list_vips_with_filter(self):
|
||||
name = data_utils.rand_name('vip-')
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, pool['id'])
|
||||
attr_exceptions = ['status', 'session_persistence',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'vip', attr_exceptions, name=name, protocol="HTTPS",
|
||||
protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('27f56083-9af9-4a48-abe9-ca1bcc6c9035')
|
||||
def test_create_update_delete_pool_vip(self):
|
||||
# Creates a vip
|
||||
name = data_utils.rand_name('vip-')
|
||||
address = self.subnet['allocation_pools'][0]['end']
|
||||
body = self.client.create_pool(
|
||||
name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.create_vip(name=name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet['id'],
|
||||
pool_id=pool['id'],
|
||||
address=address)
|
||||
vip = body['vip']
|
||||
vip_id = vip['id']
|
||||
# Confirm VIP's address correctness with a show
|
||||
body = self.client.show_vip(vip_id)
|
||||
vip = body['vip']
|
||||
self.assertEqual(address, vip['address'])
|
||||
# Verification of vip update
|
||||
new_name = "New_vip"
|
||||
new_description = "New description"
|
||||
persistence_type = "HTTP_COOKIE"
|
||||
update_data = {"session_persistence": {
|
||||
"type": persistence_type}}
|
||||
body = self.client.update_vip(vip_id,
|
||||
name=new_name,
|
||||
description=new_description,
|
||||
connection_limit=10,
|
||||
admin_state_up=False,
|
||||
**update_data)
|
||||
updated_vip = body['vip']
|
||||
self.assertEqual(new_name, updated_vip['name'])
|
||||
self.assertEqual(new_description, updated_vip['description'])
|
||||
self.assertEqual(10, updated_vip['connection_limit'])
|
||||
self.assertFalse(updated_vip['admin_state_up'])
|
||||
self.assertEqual(persistence_type,
|
||||
updated_vip['session_persistence']['type'])
|
||||
self.client.delete_vip(vip['id'])
|
||||
self.client.wait_for_resource_deletion('vip', vip['id'])
|
||||
# Verification of pool update
|
||||
new_name = "New_pool"
|
||||
body = self.client.update_pool(pool['id'],
|
||||
name=new_name,
|
||||
description="new_description",
|
||||
lb_method='LEAST_CONNECTIONS')
|
||||
updated_pool = body['pool']
|
||||
self.assertEqual(new_name, updated_pool['name'])
|
||||
self.assertEqual('new_description', updated_pool['description'])
|
||||
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
|
||||
self.client.delete_pool(pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0435a95e-1d19-4d90-9e9f-3b979e9ad089')
|
||||
def test_show_vip(self):
|
||||
# Verifies the details of a vip
|
||||
body = self.client.show_vip(self.vip['id'])
|
||||
vip = body['vip']
|
||||
for key, value in vip.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.vip[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6e7a7d31-8451-456d-b24a-e50479ce42a7')
|
||||
def test_show_pool(self):
|
||||
# Here we need to new pool without any dependence with vips
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, pool['id'])
|
||||
# Verifies the details of a pool
|
||||
body = self.client.show_pool(pool['id'])
|
||||
shown_pool = body['pool']
|
||||
for key, value in pool.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(value, shown_pool[key])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d1ab1ffa-e06a-487f-911f-56418cb27727')
|
||||
def test_list_pools(self):
|
||||
# Verify the pool exists in the list of all pools
|
||||
body = self.client.list_pools()
|
||||
pools = body['pools']
|
||||
self.assertIn(self.pool['id'], [p['id'] for p in pools])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('27cc4c1a-caac-4273-b983-2acb4afaad4f')
|
||||
def test_list_pools_with_filters(self):
|
||||
attr_exceptions = ['status', 'vip_id', 'members', 'provider',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'pool', attr_exceptions, name=data_utils.rand_name("pool-"),
|
||||
lb_method="ROUND_ROBIN", protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('282d0dfd-5c3a-4c9b-b39c-c99782f39193')
|
||||
def test_list_members(self):
|
||||
# Verify the member exists in the list of all members
|
||||
body = self.client.list_members()
|
||||
members = body['members']
|
||||
self.assertIn(self.member['id'], [m['id'] for m in members])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('243b5126-24c6-4879-953e-7c7e32d8a57f')
|
||||
def test_list_members_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description']
|
||||
self._check_list_with_filter('member', attr_exceptions,
|
||||
address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('fb833ee8-9e69-489f-b540-a409762b78b2')
|
||||
def test_create_update_delete_member(self):
|
||||
# Creates a member
|
||||
body = self.client.create_member(address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
member = body['member']
|
||||
# Verification of member update
|
||||
body = self.client.update_member(member['id'],
|
||||
admin_state_up=False)
|
||||
updated_member = body['member']
|
||||
self.assertFalse(updated_member['admin_state_up'])
|
||||
# Verification of member delete
|
||||
self.client.delete_member(member['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('893cd71f-a7dd-4485-b162-f6ab9a534914')
|
||||
def test_show_member(self):
|
||||
# Verifies the details of a member
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
for key, value in member.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.member[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8e5822c5-68a4-4224-8d6c-a617741ebc2d')
|
||||
def test_list_health_monitors(self):
|
||||
# Verify the health monitor exists in the list of all health monitors
|
||||
body = self.client.list_health_monitors()
|
||||
health_monitors = body['health_monitors']
|
||||
self.assertIn(self.health_monitor['id'],
|
||||
[h['id'] for h in health_monitors])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('49bac58a-511c-4875-b794-366698211d25')
|
||||
def test_list_health_monitors_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description', 'pools']
|
||||
self._check_list_with_filter('health_monitor', attr_exceptions,
|
||||
delay=5, max_retries=4, type="TCP",
|
||||
timeout=2)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e8ce05c4-d554-4d1e-a257-ad32ce134bb5')
|
||||
def test_create_update_delete_health_monitor(self):
|
||||
# Creates a health_monitor
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="TCP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
# Verification of health_monitor update
|
||||
body = (self.client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
admin_state_up=False))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertFalse(updated_health_monitor['admin_state_up'])
|
||||
# Verification of health_monitor delete
|
||||
body = self.client.delete_health_monitor(health_monitor['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d3e1aebc-06c2-49b3-9816-942af54012eb')
|
||||
def test_create_health_monitor_http_type(self):
|
||||
hm_type = "HTTP"
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type=hm_type,
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
self.assertEqual(hm_type, health_monitor['type'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0eff9f67-90fb-4bb1-b4ed-c5fda99fff0c')
|
||||
def test_update_health_monitor_http_method(self):
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="HTTP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
body = (self.client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
http_method="POST",
|
||||
url_path="/home/user",
|
||||
expected_codes="290"))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertEqual("POST", updated_health_monitor['http_method'])
|
||||
self.assertEqual("/home/user", updated_health_monitor['url_path'])
|
||||
self.assertEqual("290", updated_health_monitor['expected_codes'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('08e126ab-1407-483f-a22e-b11cc032ca7c')
|
||||
def test_show_health_monitor(self):
|
||||
# Verifies the details of a health_monitor
|
||||
body = self.client.show_health_monitor(self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
for key, value in health_monitor.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.health_monitor[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('87f7628e-8918-493d-af50-0602845dbb5b')
|
||||
def test_associate_disassociate_health_monitor_with_pool(self):
|
||||
# Verify that a health monitor can be associated with a pool
|
||||
self.client.associate_health_monitor_with_pool(
|
||||
self.health_monitor['id'], self.pool['id'])
|
||||
body = self.client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
self.assertIn(health_monitor['id'], pool['health_monitors'])
|
||||
# Verify that a health monitor can be disassociated from a pool
|
||||
(self.client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
self.assertNotIn(health_monitor['id'], pool['health_monitors'])
|
||||
self.assertNotIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('525fc7dc-be24-408d-938d-822e9783e027')
|
||||
def test_get_lb_pool_stats(self):
|
||||
# Verify the details of pool stats
|
||||
body = self.client.list_lb_pool_stats(self.pool['id'])
|
||||
stats = body['stats']
|
||||
self.assertIn("bytes_in", stats)
|
||||
self.assertIn("total_connections", stats)
|
||||
self.assertIn("active_connections", stats)
|
||||
self.assertIn("bytes_out", stats)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('66236be2-5121-4047-8cde-db4b83b110a5')
|
||||
def test_update_list_of_health_monitors_associated_with_pool(self):
|
||||
(self.client.associate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
self.client.update_health_monitor(
|
||||
self.health_monitor['id'], admin_state_up=False)
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
health_monitors = body['pool']['health_monitors']
|
||||
for health_monitor_id in health_monitors:
|
||||
body = self.client.show_health_monitor(health_monitor_id)
|
||||
self.assertFalse(body['health_monitor']['admin_state_up'])
|
||||
(self.client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('44ec9b40-b501-41e2-951f-4fc673b15ac0')
|
||||
def test_update_admin_state_up_of_pool(self):
|
||||
self.client.update_pool(self.pool['id'],
|
||||
admin_state_up=False)
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertFalse(pool['admin_state_up'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('466a9d4c-37c6-4ea2-b807-133437beb48c')
|
||||
def test_show_vip_associated_with_pool(self):
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.show_vip(pool['vip_id'])
|
||||
vip = body['vip']
|
||||
self.assertEqual(self.vip['name'], vip['name'])
|
||||
self.assertEqual(self.vip['id'], vip['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('7b97694e-69d0-4151-b265-e1052a465aa8')
|
||||
def test_show_members_associated_with_pool(self):
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
members = body['pool']['members']
|
||||
for member_id in members:
|
||||
body = self.client.show_member(member_id)
|
||||
self.assertIsNotNone(body['member']['status'])
|
||||
self.assertEqual(member_id, body['member']['id'])
|
||||
self.assertIsNotNone(body['member']['admin_state_up'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('73ed6f27-595b-4b2c-969c-dbdda6b8ab34')
|
||||
def test_update_pool_related_to_member(self):
|
||||
# Create new pool
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
new_pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, new_pool['id'])
|
||||
# Update member with new pool's id
|
||||
body = self.client.update_member(self.member['id'],
|
||||
pool_id=new_pool['id'])
|
||||
# Confirm with show that pool_id change
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(member['pool_id'], new_pool['id'])
|
||||
# Update member with old pool id, this is needed for clean up
|
||||
body = self.client.update_member(self.member['id'],
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('cf63f071-bbe3-40ba-97a0-a33e11923162')
|
||||
def test_update_member_weight(self):
|
||||
self.client.update_member(self.member['id'],
|
||||
weight=2)
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(2, member['weight'])
|
||||
|
||||
|
||||
@decorators.skip_because(bug="1402007")
|
||||
class LoadBalancerIpV6TestJSON(LoadBalancerTestJSON):
|
||||
_ip_version = 6
|
|
@ -14,19 +14,20 @@
|
|||
|
||||
import os
|
||||
import time
|
||||
|
||||
from neutron.i18n import _, _LI
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib import exceptions
|
||||
|
||||
# from neutron_lbaas.tests.tempest.lib import clients as tempest_clients
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.v1.api import base
|
||||
from neutron_lbaas.tests.tempest.v2.clients import health_monitors_client
|
||||
from neutron_lbaas.tests.tempest.v2.clients import listeners_client
|
||||
from neutron_lbaas.tests.tempest.v2.clients import load_balancers_client
|
||||
from neutron_lbaas.tests.tempest.v2.clients import members_client
|
||||
from neutron_lbaas.tests.tempest.v2.clients import pools_client
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import clients as tempest_clients
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.openstack.common import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
# Use local tempest conf if one is available.
|
||||
|
@ -45,9 +46,11 @@ class BaseTestCase(base.BaseNetworkTest):
|
|||
def resource_setup(cls):
|
||||
super(BaseTestCase, cls).resource_setup()
|
||||
|
||||
credentials = cls.isolated_creds.get_primary_creds()
|
||||
mgr = tempest_clients.Manager(credentials=credentials)
|
||||
auth_provider = mgr.get_auth_provider(credentials)
|
||||
# credentials = cls.isolated_creds.get_primary_creds()
|
||||
# mgr = tempest_clients.Manager(credentials=credentials)
|
||||
mgr = cls.get_client_manager()
|
||||
# auth_provider = mgr.get_auth_provider(credentials)
|
||||
auth_provider = mgr.auth_provider
|
||||
client_args = [auth_provider, 'network', 'regionOne']
|
||||
|
||||
cls.load_balancers_client = (
|
||||
|
@ -331,9 +334,16 @@ class BaseAdminTestCase(BaseTestCase):
|
|||
def resource_setup(cls):
|
||||
|
||||
super(BaseAdminTestCase, cls).resource_setup()
|
||||
credentials_admin = cls.isolated_creds.get_admin_creds()
|
||||
mgr_admin = tempest_clients.Manager(credentials=credentials_admin)
|
||||
auth_provider_admin = mgr_admin.get_auth_provider(credentials_admin)
|
||||
|
||||
# credentials = cls.isolated_creds.get_primary_creds()
|
||||
# mgr = tempest_clients.Manager(credentials=credentials)
|
||||
mgr = cls.get_client_manager(credential_type='admin')
|
||||
# auth_provider = mgr.get_auth_provider(credentials)
|
||||
auth_provider_admin = mgr.auth_provider
|
||||
|
||||
# credentials_admin = cls.isolated_creds.get_admin_creds()
|
||||
# mgr_admin = tempest_clients.Manager(credentials=credentials_admin)
|
||||
# auth_provider_admin = mgr_admin.get_auth_provider(credentials_admin)
|
||||
client_args = [auth_provider_admin, 'network', 'regionOne']
|
||||
|
||||
cls.load_balancers_client = (
|
||||
|
|
|
@ -11,11 +11,12 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import exceptions as ex
|
||||
from tempest import test
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import exceptions as ex
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
|
||||
class TestHealthMonitors(base.BaseTestCase):
|
||||
|
|
|
@ -13,13 +13,14 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import decorators
|
||||
from tempest_lib import exceptions
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.openstack.common import log as logging
|
||||
from tempest import test
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
@ -218,7 +219,7 @@ class ListenersTestJSON(base.BaseTestCase):
|
|||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_listener_invalid_name(self):
|
||||
"""Test create listener with an invalid name"""
|
||||
|
@ -231,7 +232,7 @@ class ListenersTestJSON(base.BaseTestCase):
|
|||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_listener_invalid_description(self):
|
||||
"""Test create listener with an invalid description"""
|
||||
|
@ -403,7 +404,7 @@ class ListenersTestJSON(base.BaseTestCase):
|
|||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_listener_invalid_name(self):
|
||||
"""Test update a listener with an invalid name"""
|
||||
|
@ -414,7 +415,7 @@ class ListenersTestJSON(base.BaseTestCase):
|
|||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_listener_invalid_description(self):
|
||||
"""Test update a listener with an invalid description"""
|
||||
|
|
|
@ -12,11 +12,12 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import data_utils
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import config
|
||||
from tempest.openstack.common import log as logging
|
||||
from tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
|
|
@ -15,12 +15,14 @@
|
|||
|
||||
from netaddr import IPAddress
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import decorators
|
||||
from tempest_lib import exceptions
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.openstack.common import log as logging
|
||||
from tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
@ -224,7 +226,7 @@ class LoadBalancersTestJSON(base.BaseTestCase):
|
|||
wait=False,
|
||||
tenant_id="&^%123")
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_load_balancer_invalid_name(self):
|
||||
"""Test create load balancer with an invalid name"""
|
||||
|
@ -235,7 +237,7 @@ class LoadBalancersTestJSON(base.BaseTestCase):
|
|||
vip_subnet_id=self.subnet['id'],
|
||||
name='n' * 256)
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_load_balancer_invalid_description(self):
|
||||
"""Test create load balancer with an invalid description"""
|
||||
|
@ -302,7 +304,7 @@ class LoadBalancersTestJSON(base.BaseTestCase):
|
|||
self.load_balancer_id)
|
||||
self.assertEqual(load_balancer.get('name'), "")
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_load_balancer_invalid_name(self):
|
||||
"""Test update load balancer with invalid name"""
|
||||
|
@ -324,7 +326,7 @@ class LoadBalancersTestJSON(base.BaseTestCase):
|
|||
load_balancer_new = load_balancer['name']
|
||||
self.assertEqual(load_balancer_initial, load_balancer_new)
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_load_balancer_invalid_description(self):
|
||||
"""Test update load balancer with invalid description"""
|
||||
|
|
|
@ -12,13 +12,14 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import config
|
||||
from tempest import exceptions as ex
|
||||
from tempest.openstack.common import log as logging
|
||||
from tempest import test
|
||||
from oslo_log import log as logging
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import exceptions as ex
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import config
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import test
|
||||
from tempest_lib.common.utils import data_utils
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.common.utils import data_utils
|
||||
from tempest import exceptions as ex
|
||||
from tempest import test
|
||||
from tempest_lib.common.utils import data_utils
|
||||
from tempest_lib import decorators
|
||||
from tempest_lib import exceptions as ex
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib import test
|
||||
from neutron_lbaas.tests.tempest.v2.api import base
|
||||
|
||||
|
||||
PROTOCOL_PORT = 80
|
||||
|
||||
|
||||
|
@ -350,7 +350,7 @@ class TestPools(base.BaseTestCase):
|
|||
tenant_id=tenant,
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_pool_invalid_name_field(self):
|
||||
"""
|
||||
|
@ -362,7 +362,7 @@ class TestPools(base.BaseTestCase):
|
|||
lb_algorithm='ROUND_ROBIN',
|
||||
name='n' * 256)
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_create_pool_invalid_desc_field(self):
|
||||
"""
|
||||
|
@ -478,7 +478,7 @@ class TestPools(base.BaseTestCase):
|
|||
self.assertAlmostEqual(sess_pers, pool.get('session_persistence'))
|
||||
self._delete_pool(new_pool.get('id'))
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_pool_invalid_name(self):
|
||||
"""Test update pool with invalid name"""
|
||||
|
@ -487,7 +487,7 @@ class TestPools(base.BaseTestCase):
|
|||
new_pool.get('id'), name='n' * 256)
|
||||
self._delete_pool(new_pool.get('id'))
|
||||
|
||||
@test.skip_because(bug="1434717")
|
||||
@decorators.skip_because(bug="1434717")
|
||||
@test.attr(type='negative')
|
||||
def test_update_pool_invalid_desc(self):
|
||||
"""Test update pool with invalid desc"""
|
||||
|
|
|
@ -16,7 +16,7 @@ import urllib
|
|||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tempest.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class HealthMonitorsClientJSON(service_client.ServiceClient):
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
import urllib
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from tempest.common import service_client
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class ListenersClientJSON(service_client.ServiceClient):
|
||||
|
|
|
@ -16,7 +16,7 @@ import urllib
|
|||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tempest.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class LoadBalancersClientJSON(service_client.ServiceClient):
|
||||
|
|
|
@ -16,7 +16,7 @@ import urllib
|
|||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tempest.common import service_client
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class MembersClientJSON(service_client.ServiceClient):
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
import urllib
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from tempest.common import service_client
|
||||
|
||||
from neutron_lbaas.tests.tempest.lib.common import service_client
|
||||
|
||||
|
||||
class PoolsClientJSON(service_client.ServiceClient):
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script is intended to allow repeatable migration of the neutron
|
||||
# api tests from tempest. The intention is to allow development to
|
||||
# continue in Tempest while the migration strategy evolves.
|
||||
|
||||
set -e
|
||||
|
||||
if [[ "$#" -ne 1 ]]; then
|
||||
>&2 echo "Usage: $0 /path/to/neutron
|
||||
Migrate lbaas's api tests from a neutron repo."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEUTRON_PATH=${NEUTRON_PATH:-$1}
|
||||
|
||||
if [ ! -d "$NEUTRON_PATH/neutron/tests/tempest" ]; then
|
||||
>&2 echo "Unable to find tempest at '$NEUTRON_PATH'. Please verify that the specified path points to a valid tempest repo."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEUTRON_LBAAS_PATH=${NEUTRON_LBAAS_PATH:-$(cd "$(dirname "$0")/.." && pwd)}
|
||||
NEUTRON_LBAAS_TEST_PATH=$NEUTRON_LBAAS_PATH/neutron_lbaas/tests
|
||||
|
||||
function copy_files {
|
||||
local neutron_dep_paths=(
|
||||
''
|
||||
'common'
|
||||
'common/generator'
|
||||
'common/utils'
|
||||
'services'
|
||||
'services/identity'
|
||||
'services/identity/v2'
|
||||
'services/identity/v2/json'
|
||||
'services/identity/v3'
|
||||
'services/identity/v3/json'
|
||||
'services/network'
|
||||
'services/network/json'
|
||||
)
|
||||
for neutron_dep_path in ${neutron_dep_paths[@]}; do
|
||||
local target_path=$NEUTRON_LBAAS_TEST_PATH/tempest/lib/$neutron_dep_path
|
||||
if [[ ! -d "$target_path" ]]; then
|
||||
mkdir -p "$target_path"
|
||||
fi
|
||||
cp $NEUTRON_PATH/neutron/tests/tempest/$neutron_dep_path/*.py "$target_path"
|
||||
done
|
||||
# local paths_to_remove=(
|
||||
# "$NEUTRON_LBAAS_TEST_PATH/tempest/clients.py"
|
||||
# )
|
||||
# for path_to_remove in ${paths_to_remove[@]}; do
|
||||
# if [ -f "$path_to_remove" ]; then
|
||||
# rm "$path_to_remove"
|
||||
# fi
|
||||
# done
|
||||
|
||||
# Tests are now maintained in neutron/tests/api
|
||||
cp $NEUTRON_PATH/neutron/tests/api/*.py $NEUTRON_LBAAS_TEST_PATH/tempest/v1/api
|
||||
cp $NEUTRON_PATH/neutron/tests/api/admin/*.py \
|
||||
$NEUTRON_LBAAS_TEST_PATH/tempest/v1/api/admin
|
||||
}
|
||||
|
||||
function rewrite_imports {
|
||||
regexes=(
|
||||
's/neutron.tests.tempest.common.generator/neutron_lbaas.tests.tempest.lib.common.generator/'
|
||||
"s/neutron.tests.api/neutron_lbaas.tests.tempest.v1.api/"
|
||||
's/neutron.tests.tempest.test/neutron_lbaas.tests.tempest.lib.test/'
|
||||
's/from neutron.tests.api import clients/from neutron_lbaas.tests.tempest.v1.api import clients/'
|
||||
's/from neutron.tests.tempest/from neutron_lbaas.tests.tempest.lib/'
|
||||
's/CONF.lock_path/CONF.oslo_concurrency.lock_path/'
|
||||
)
|
||||
files=$(find "$NEUTRON_LBAAS_TEST_PATH/tempest/lib" "$NEUTRON_LBAAS_TEST_PATH/tempest/v1/api" -name '*.py')
|
||||
for ((i = 0; i < ${#regexes[@]}; i++)); do
|
||||
perl -p -i -e "${regexes[$i]}" $files
|
||||
done
|
||||
}
|
||||
|
||||
copy_files
|
||||
rewrite_imports
|
22
tox.ini
22
tox.ini
|
@ -48,11 +48,9 @@ deps =
|
|||
{[testenv]deps}
|
||||
pylint
|
||||
commands =
|
||||
sh ./tools/check_bash.sh
|
||||
flake8
|
||||
pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron_lbaas}
|
||||
#neutron-db-manage check_migration
|
||||
#sh -c "find neutron_lbaas -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null"
|
||||
whitelist_externals = sh
|
||||
|
||||
[testenv:i18n]
|
||||
|
@ -81,7 +79,7 @@ commands = python setup.py build_sphinx
|
|||
ignore = E125,E126,E128,E129,E265,H305,H404,H405,N324
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios
|
||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,neutron_lbaas/tests/tempest/lib,neutron_lbaas/tests/tempest/v1/api
|
||||
|
||||
[hacking]
|
||||
import_exceptions = neutron.i18n
|
||||
|
@ -95,3 +93,21 @@ setenv =
|
|||
OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest
|
||||
OS_TESTR_CONCURRENCY=1
|
||||
TEMPEST_CONFIG_DIR={toxinidir}/neutron_lbaas/tests/tempest/etc
|
||||
|
||||
[testenv:apiv1]
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
sitepackages = True
|
||||
setenv =
|
||||
OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v1/api
|
||||
OS_TESTR_CONCURRENCY=1
|
||||
TEMPEST_CONFIG_DIR={toxinidir}/neutron_lbaas/tests/tempest/etc
|
||||
|
||||
[testenv:apiv2]
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
sitepackages = True
|
||||
setenv =
|
||||
OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v2/api
|
||||
OS_TESTR_CONCURRENCY=1
|
||||
TEMPEST_CONFIG_DIR={toxinidir}/neutron_lbaas/tests/tempest/etc
|
||||
|
|
Loading…
Reference in New Issue