diff --git a/neutron_lbaas/tests/tempest/etc/tempest.conf b/neutron_lbaas/tests/tempest/etc/tempest.conf deleted file mode 100644 index 08d048831..000000000 --- a/neutron_lbaas/tests/tempest/etc/tempest.conf +++ /dev/null @@ -1,47 +0,0 @@ -[DEFAULT] -# Leaving this as a placeholder -verbose=false -debug=false -use_stderr=false -lock_path = /opt/stack/data/tempest - -[identity] -# Replace these with values that represent your identity configuration -uri=http://localhost:5000/v2.0 -uri_v3=http://localhost:5000/v3 -auth_version=v2 -region=RegionOne - -admin_domain_name = Default -admin_tenant_id = 3c1f71f1a5c446d199809bd2f21d87ff -admin_tenant_name = admin -admin_password = password -admin_username = admin - -alt_tenant_name = alt_demo -alt_password = password -alt_username = alt_demo - -tenant_name = demo -password = password -username = demo - -[service_available] -tuskar = False -neutron = True -heat = False -ceilometer = False -swift = False -cinder = False -nova = True -glance = True -horizon = False - -[lbaas] -# These are not currently being pulled in. -# Need to implement CONF strategy for this. -catalog_type=network -region=RegionOne -endpoint_type=publicURL -build_interval=10 -build_timeout=300 diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/request/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/request/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/flavors.py b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/flavors.py new file mode 100644 index 000000000..adaaf270c --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/flavors.py @@ -0,0 +1,58 @@ +# (c) 2014 Deutsche Telekom AG +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +common_flavor_details = { + "name": "get-flavor-details", + "http-method": "GET", + "url": "flavors/%s", + "resources": [ + {"name": "flavor", "expected_result": 404} + ] +} + +common_flavor_list = { + "name": "list-flavors-with-detail", + "http-method": "GET", + "url": "flavors/detail", + "json-schema": { + "type": "object", + "properties": { + } + } +} + +common_admin_flavor_create = { + "name": "flavor-create", + "http-method": "POST", + "admin_client": True, + "url": "flavors", + "default_result_code": 400, + "json-schema": { + "type": "object", + "properties": { + "flavor": { + "type": "object", + "properties": { + "name": {"type": "string", + "exclude_tests": ["gen_str_min_length"]}, + "ram": {"type": "integer", "minimum": 1}, + "vcpus": {"type": "integer", "minimum": 1}, + "disk": {"type": "integer"}, + "id": {"type": "integer", + "exclude_tests": ["gen_none", "gen_string"] + }, + } + } + } + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/v2/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/v2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/v2/flavors.py b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/v2/flavors.py new file mode 100644 index 000000000..d9de8d1b1 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/request/compute/v2/flavors.py @@ -0,0 +1,39 @@ +# (c) 2014 Deutsche Telekom AG +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from neutron_lbaas.tests.tempest.lib.api_schema.request.compute import flavors + +flavors_details = copy.deepcopy(flavors.common_flavor_details) + +flavor_list = copy.deepcopy(flavors.common_flavor_list) + +flavor_create = copy.deepcopy(flavors.common_admin_flavor_create) + +flavor_list["json-schema"]["properties"] = { + "minRam": { + "type": "integer", + "results": { + "gen_none": 400, + "gen_string": 400 + } + }, + "minDisk": { + "type": "integer", + "results": { + "gen_none": 400, + "gen_string": 400 + } + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/__init__.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors.py new file mode 100644 index 000000000..d419f36de --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors.py @@ -0,0 +1,99 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 \ + import parameter_types + +list_flavors = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'flavors': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'links': parameter_types.links, + 'id': {'type': 'string'} + }, + 'required': ['name', 'links', 'id'] + } + }, + 'flavors_links': parameter_types.links + }, + # NOTE(gmann): flavors_links attribute is not necessary + # to be present always So it is not 'required'. + 'required': ['flavors'] + } +} + +common_flavor_info = { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'links': parameter_types.links, + 'ram': {'type': 'integer'}, + 'vcpus': {'type': 'integer'}, + # 'swap' attributes comes as integer value but if it is empty + # it comes as "". So defining type of as string and integer. + 'swap': {'type': ['integer', 'string']}, + 'disk': {'type': 'integer'}, + 'id': {'type': 'string'}, + 'OS-FLV-DISABLED:disabled': {'type': 'boolean'}, + 'os-flavor-access:is_public': {'type': 'boolean'}, + 'rxtx_factor': {'type': 'number'}, + 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'} + }, + # 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and + # 'OS-FLV-EXT-DATA' are API extensions. So they are not 'required'. + 'required': ['name', 'links', 'ram', 'vcpus', 'swap', 'disk', 'id'] +} + +list_flavors_details = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'flavors': { + 'type': 'array', + 'items': common_flavor_info + }, + # NOTE(gmann): flavors_links attribute is not necessary + # to be present always So it is not 'required'. + 'flavors_links': parameter_types.links + }, + 'required': ['flavors'] + } +} + +unset_flavor_extra_specs = { + 'status_code': [200] +} + +create_get_flavor_details = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'flavor': common_flavor_info + }, + 'required': ['flavor'] + } +} + +delete_flavor = { + 'status_code': [202] +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_access.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_access.py new file mode 100644 index 000000000..cd31b0a94 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_access.py @@ -0,0 +1,34 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +add_remove_list_flavor_access = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'flavor_access': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'flavor_id': {'type': 'string'}, + 'tenant_id': {'type': 'string'}, + }, + 'required': ['flavor_id', 'tenant_id'], + } + } + }, + 'required': ['flavor_access'] + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py new file mode 100644 index 000000000..faa25d079 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py @@ -0,0 +1,39 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set_get_flavor_extra_specs = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'extra_specs': { + 'type': 'object', + 'patternProperties': { + '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'} + } + } + }, + 'required': ['extra_specs'] + } +} + +set_get_flavor_extra_specs_key = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'patternProperties': { + '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'} + } + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/floating_ips.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/floating_ips.py new file mode 100644 index 000000000..ad1c53162 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/floating_ips.py @@ -0,0 +1,148 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +common_floating_ip_info = { + 'type': 'object', + 'properties': { + # NOTE: Now the type of 'id' is integer, but + # here allows 'string' also because we will be + # able to change it to 'uuid' in the future. + 'id': {'type': ['integer', 'string']}, + 'pool': {'type': ['string', 'null']}, + 'instance_id': {'type': ['string', 'null']}, + 'ip': { + 'type': 'string', + 'format': 'ip-address' + }, + 'fixed_ip': { + 'type': ['string', 'null'], + 'format': 'ip-address' + } + }, + 'required': ['id', 'pool', 'instance_id', + 'ip', 'fixed_ip'], + +} +list_floating_ips = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ips': { + 'type': 'array', + 'items': common_floating_ip_info + }, + }, + 'required': ['floating_ips'], + } +} + +create_get_floating_ip = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ip': common_floating_ip_info + }, + 'required': ['floating_ip'], + } +} + +list_floating_ip_pools = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ip_pools': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'} + }, + 'required': ['name'], + } + } + }, + 'required': ['floating_ip_pools'], + } +} + +add_remove_floating_ip = { + 'status_code': [202] +} + +create_floating_ips_bulk = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ips_bulk_create': { + 'type': 'object', + 'properties': { + 'interface': {'type': ['string', 'null']}, + 'ip_range': {'type': 'string'}, + 'pool': {'type': ['string', 'null']}, + }, + 'required': ['interface', 'ip_range', 'pool'], + } + }, + 'required': ['floating_ips_bulk_create'], + } +} + +delete_floating_ips_bulk = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ips_bulk_delete': {'type': 'string'} + }, + 'required': ['floating_ips_bulk_delete'], + } +} + +list_floating_ips_bulk = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'floating_ip_info': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'address': { + 'type': 'string', + 'format': 'ip-address' + }, + 'instance_uuid': {'type': ['string', 'null']}, + 'interface': {'type': ['string', 'null']}, + 'pool': {'type': ['string', 'null']}, + 'project_id': {'type': ['string', 'null']}, + 'fixed_ip': { + 'type': ['string', 'null'], + 'format': 'ip-address' + } + }, + # NOTE: fixed_ip is introduced after JUNO release, + # So it is not defined as 'required'. + 'required': ['address', 'instance_uuid', 'interface', + 'pool', 'project_id'], + } + } + }, + 'required': ['floating_ip_info'], + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/images.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/images.py new file mode 100644 index 000000000..a621a02fc --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/images.py @@ -0,0 +1,147 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 \ + import parameter_types + +image_links = copy.deepcopy(parameter_types.links) +image_links['items']['properties'].update({'type': {'type': 'string'}}) + +common_image_schema = { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'status': {'type': 'string'}, + 'updated': {'type': 'string'}, + 'links': image_links, + 'name': {'type': 'string'}, + 'created': {'type': 'string'}, + 'minDisk': {'type': 'integer'}, + 'minRam': {'type': 'integer'}, + 'progress': {'type': 'integer'}, + 'metadata': {'type': 'object'}, + 'server': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'links': parameter_types.links + }, + 'required': ['id', 'links'] + }, + 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}, + 'OS-DCF:diskConfig': {'type': 'string'} + }, + # 'server' attributes only comes in response body if image is + # associated with any server. 'OS-EXT-IMG-SIZE:size' & 'OS-DCF:diskConfig' + # are API extension, So those are not defined as 'required'. + 'required': ['id', 'status', 'updated', 'links', 'name', + 'created', 'minDisk', 'minRam', 'progress', + 'metadata'] +} + +get_image = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'image': common_image_schema + }, + 'required': ['image'] + } +} + +list_images = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'images': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'links': image_links, + 'name': {'type': 'string'} + }, + 'required': ['id', 'links', 'name'] + } + }, + 'images_links': parameter_types.links + }, + # NOTE(gmann): images_links attribute is not necessary to be + # present always So it is not 'required'. + 'required': ['images'] + } +} + +create_image = { + 'status_code': [202], + 'response_header': { + 'type': 'object', + 'properties': parameter_types.response_header + } +} +create_image['response_header']['properties'].update( + {'location': { + 'type': 'string', + 'format': 'uri'} + } +) +create_image['response_header']['required'] = ['location'] + +delete = { + 'status_code': [204] +} + +image_metadata = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'metadata': {'type': 'object'} + }, + 'required': ['metadata'] + } +} + +image_meta_item = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'meta': {'type': 'object'} + }, + 'required': ['meta'] + } +} + +list_images_details = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'images': { + 'type': 'array', + 'items': common_image_schema + }, + 'images_links': parameter_types.links + }, + # NOTE(gmann): images_links attribute is not necessary to be + # present always So it is not 'required'. + 'required': ['images'] + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/interfaces.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/interfaces.py new file mode 100644 index 000000000..8e26fb08b --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/interfaces.py @@ -0,0 +1,73 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 \ + import parameter_types + +interface_common_info = { + 'type': 'object', + 'properties': { + 'port_state': {'type': 'string'}, + 'fixed_ips': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'subnet_id': { + 'type': 'string', + 'format': 'uuid' + }, + 'ip_address': { + 'type': 'string', + 'format': 'ipv4' + } + }, + 'required': ['subnet_id', 'ip_address'] + } + }, + 'port_id': {'type': 'string', 'format': 'uuid'}, + 'net_id': {'type': 'string', 'format': 'uuid'}, + 'mac_addr': parameter_types.mac_address + }, + 'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr'] +} + +get_create_interfaces = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'interfaceAttachment': interface_common_info + }, + 'required': ['interfaceAttachment'] + } +} + +list_interfaces = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'interfaceAttachments': { + 'type': 'array', + 'items': interface_common_info + } + }, + 'required': ['interfaceAttachments'] + } +} + +delete_interface = { + 'status_code': [202] +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/keypairs.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/keypairs.py new file mode 100644 index 000000000..ceae6cf9e --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/keypairs.py @@ -0,0 +1,100 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +get_keypair = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'keypair': { + 'type': 'object', + 'properties': { + 'public_key': {'type': 'string'}, + 'name': {'type': 'string'}, + 'fingerprint': {'type': 'string'}, + 'user_id': {'type': 'string'}, + 'deleted': {'type': 'boolean'}, + 'created_at': {'type': 'string'}, + 'updated_at': {'type': ['string', 'null']}, + 'deleted_at': {'type': ['string', 'null']}, + 'id': {'type': 'integer'} + + }, + # When we run the get keypair API, response body includes + # all the above mentioned attributes. + # But in Nova API sample file, response body includes only + # 'public_key', 'name' & 'fingerprint'. So only 'public_key', + # 'name' & 'fingerprint' are defined as 'required'. + 'required': ['public_key', 'name', 'fingerprint'] + } + }, + 'required': ['keypair'] + } +} + +create_keypair = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'keypair': { + 'type': 'object', + 'properties': { + 'fingerprint': {'type': 'string'}, + 'name': {'type': 'string'}, + 'public_key': {'type': 'string'}, + 'user_id': {'type': 'string'}, + 'private_key': {'type': 'string'} + }, + # When create keypair API is being called with 'Public key' + # (Importing keypair) then, response body does not contain + # 'private_key' So it is not defined as 'required' + 'required': ['fingerprint', 'name', 'public_key', 'user_id'] + } + }, + 'required': ['keypair'] + } +} + +delete_keypair = { + 'status_code': [202], +} + +list_keypairs = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'keypairs': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'keypair': { + 'type': 'object', + 'properties': { + 'public_key': {'type': 'string'}, + 'name': {'type': 'string'}, + 'fingerprint': {'type': 'string'} + }, + 'required': ['public_key', 'name', 'fingerprint'] + } + }, + 'required': ['keypair'] + } + } + }, + 'required': ['keypairs'] + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py new file mode 100644 index 000000000..7b4264c0e --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py @@ -0,0 +1,83 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +links = { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'href': { + 'type': 'string', + 'format': 'uri' + }, + 'rel': {'type': 'string'} + }, + 'additionalProperties': False, + 'required': ['href', 'rel'] + } +} + +mac_address = { + 'type': 'string', + 'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}' +} + +access_ip_v4 = { + 'type': 'string', + 'anyOf': [{'format': 'ipv4'}, {'enum': ['']}] +} + +access_ip_v6 = { + 'type': 'string', + 'anyOf': [{'format': 'ipv6'}, {'enum': ['']}] +} + +addresses = { + 'type': 'object', + 'patternProperties': { + # NOTE: Here is for 'private' or something. + '^[a-zA-Z0-9-_.]+$': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'version': {'type': 'integer'}, + 'addr': { + 'type': 'string', + 'anyOf': [ + {'format': 'ipv4'}, + {'format': 'ipv6'} + ] + } + }, + 'additionalProperties': False, + 'required': ['version', 'addr'] + } + } + } +} + +response_header = { + 'connection': {'type': 'string'}, + 'content-length': {'type': 'string'}, + 'content-type': {'type': 'string'}, + 'status': {'type': 'string'}, + 'x-compute-request-id': {'type': 'string'}, + 'vary': {'type': 'string'}, + 'x-openstack-nova-api-version': {'type': 'string'}, + 'date': { + 'type': 'string', + 'format': 'data-time' + } +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/security_groups.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/security_groups.py new file mode 100644 index 000000000..9a852e52e --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/security_groups.py @@ -0,0 +1,105 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +common_security_group_rule = { + 'from_port': {'type': ['integer', 'null']}, + 'to_port': {'type': ['integer', 'null']}, + 'group': { + 'type': 'object', + 'properties': { + 'tenant_id': {'type': 'string'}, + 'name': {'type': 'string'} + } + }, + 'ip_protocol': {'type': ['string', 'null']}, + # 'parent_group_id' can be UUID so defining it as 'string' also. + 'parent_group_id': {'type': ['string', 'integer', 'null']}, + 'ip_range': { + 'type': 'object', + 'properties': { + 'cidr': {'type': 'string'} + } + # When optional argument is provided in request body + # like 'group_id' then, attribute 'cidr' does not + # comes in response body. So it is not 'required'. + }, + 'id': {'type': ['string', 'integer']} +} + +common_security_group = { + 'type': 'object', + 'properties': { + 'id': {'type': ['integer', 'string']}, + 'name': {'type': 'string'}, + 'tenant_id': {'type': 'string'}, + 'rules': { + 'type': 'array', + 'items': { + 'type': ['object', 'null'], + 'properties': common_security_group_rule + } + }, + 'description': {'type': 'string'}, + }, + 'required': ['id', 'name', 'tenant_id', 'rules', 'description'], +} + +list_security_groups = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'security_groups': { + 'type': 'array', + 'items': common_security_group + } + }, + 'required': ['security_groups'] + } +} + +get_security_group = create_security_group = update_security_group = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'security_group': common_security_group + }, + 'required': ['security_group'] + } +} + +delete_security_group = { + 'status_code': [202] +} + +create_security_group_rule = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'security_group_rule': { + 'type': 'object', + 'properties': common_security_group_rule, + 'required': ['from_port', 'to_port', 'group', 'ip_protocol', + 'parent_group_id', 'id', 'ip_range'] + } + }, + 'required': ['security_group_rule'] + } +} + +delete_security_group_rule = { + 'status_code': [202] +} diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/servers.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/servers.py new file mode 100644 index 000000000..a97dfe747 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/servers.py @@ -0,0 +1,519 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 \ + import parameter_types + +create_server = { + 'status_code': [202], + 'response_body': { + 'type': 'object', + 'properties': { + 'server': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'security_groups': {'type': 'array'}, + 'links': parameter_types.links, + 'OS-DCF:diskConfig': {'type': 'string'} + }, + # NOTE: OS-DCF:diskConfig & security_groups are API extension, + # and some environments return a response without these + # attributes.So they are not 'required'. + 'required': ['id', 'links'] + } + }, + 'required': ['server'] + } +} + +create_server_with_admin_pass = copy.deepcopy(create_server) +create_server_with_admin_pass['response_body']['properties']['server'][ + 'properties'].update({'adminPass': {'type': 'string'}}) +create_server_with_admin_pass['response_body']['properties']['server'][ + 'required'].append('adminPass') + +list_servers = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'servers': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'links': parameter_types.links, + 'name': {'type': 'string'} + }, + 'required': ['id', 'links', 'name'] + } + }, + 'servers_links': parameter_types.links + }, + # NOTE(gmann): servers_links attribute is not necessary to be + # present always So it is not 'required'. + 'required': ['servers'] + } +} + +delete_server = { + 'status_code': [204], +} + +common_show_server = { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'name': {'type': 'string'}, + 'status': {'type': 'string'}, + 'image': {'oneOf': [ + {'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'links': parameter_types.links + }, + 'required': ['id', 'links']}, + {'type': ['string', 'null']} + ]}, + 'flavor': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'links': parameter_types.links + }, + 'required': ['id', 'links'] + }, + 'fault': { + 'type': 'object', + 'properties': { + 'code': {'type': 'integer'}, + 'created': {'type': 'string'}, + 'message': {'type': 'string'}, + 'details': {'type': 'string'}, + }, + # NOTE(gmann): 'details' is not necessary to be present + # in the 'fault'. So it is not defined as 'required'. + 'required': ['code', 'created', 'message'] + }, + 'user_id': {'type': 'string'}, + 'tenant_id': {'type': 'string'}, + 'created': {'type': 'string'}, + 'updated': {'type': 'string'}, + 'progress': {'type': 'integer'}, + 'metadata': {'type': 'object'}, + 'links': parameter_types.links, + 'addresses': parameter_types.addresses, + 'hostId': {'type': 'string'}, + 'OS-DCF:diskConfig': {'type': 'string'}, + 'accessIPv4': parameter_types.access_ip_v4, + 'accessIPv6': parameter_types.access_ip_v6 + }, + # NOTE(GMann): 'progress' attribute is present in the response + # only when server's status is one of the progress statuses + # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE") + # 'fault' attribute is present in the response + # only when server's status is one of the "ERROR", "DELETED". + # OS-DCF:diskConfig and accessIPv4/v6 are API + # extensions, and some environments return a response + # without these attributes.So these are not defined as 'required'. + 'required': ['id', 'name', 'status', 'image', 'flavor', + 'user_id', 'tenant_id', 'created', 'updated', + 'metadata', 'links', 'addresses', 'hostId'] +} + +update_server = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'server': common_show_server + }, + 'required': ['server'] + } +} + +server_detail = copy.deepcopy(common_show_server) +server_detail['properties'].update({ + 'key_name': {'type': ['string', 'null']}, + 'security_groups': {'type': 'array'}, + + # NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ" + # attributes. + 'OS-SRV-USG:launched_at': {'type': ['string', 'null']}, + 'OS-SRV-USG:terminated_at': {'type': ['string', 'null']}, + 'OS-EXT-AZ:availability_zone': {'type': 'string'}, + + # NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR" + # attributes. + 'OS-EXT-STS:task_state': {'type': ['string', 'null']}, + 'OS-EXT-STS:vm_state': {'type': 'string'}, + 'OS-EXT-STS:power_state': {'type': 'integer'}, + 'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']}, + 'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'}, + 'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']}, + 'os-extended-volumes:volumes_attached': {'type': 'array'}, + 'config_drive': {'type': 'string'} +}) +server_detail['properties']['addresses']['patternProperties'][ + '^[a-zA-Z0-9-_.]+$']['items']['properties'].update({ + 'OS-EXT-IPS:type': {'type': 'string'}, + 'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address}) +# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr +# attributes in server address. Those are API extension, +# and some environments return a response without +# these attributes. So they are not 'required'. + +get_server = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'server': server_detail + }, + 'required': ['server'] + } +} + +list_servers_detail = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'servers': { + 'type': 'array', + 'items': server_detail + }, + 'servers_links': parameter_types.links + }, + # NOTE(gmann): servers_links attribute is not necessary to be + # present always So it is not 'required'. + 'required': ['servers'] + } +} + +rebuild_server = copy.deepcopy(update_server) +rebuild_server['status_code'] = [202] + +rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server) +rebuild_server_with_admin_pass['response_body']['properties']['server'][ + 'properties'].update({'adminPass': {'type': 'string'}}) +rebuild_server_with_admin_pass['response_body']['properties']['server'][ + 'required'].append('adminPass') + +rescue_server = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'adminPass': {'type': 'string'} + }, + 'required': ['adminPass'] + } +} + +list_virtual_interfaces = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'virtual_interfaces': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'mac_address': parameter_types.mac_address, + 'OS-EXT-VIF-NET:net_id': {'type': 'string'} + }, + # 'OS-EXT-VIF-NET:net_id' is API extension So it is + # not defined as 'required' + 'required': ['id', 'mac_address'] + } + } + }, + 'required': ['virtual_interfaces'] + } +} + +common_attach_volume_info = { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'device': {'type': 'string'}, + 'volumeId': {'type': 'string'}, + 'serverId': {'type': ['integer', 'string']} + }, + 'required': ['id', 'device', 'volumeId', 'serverId'] +} + +attach_volume = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'volumeAttachment': common_attach_volume_info + }, + 'required': ['volumeAttachment'] + } +} + +detach_volume = { + 'status_code': [202] +} + +get_volume_attachment = copy.deepcopy(attach_volume) +get_volume_attachment['response_body']['properties'][ + 'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}}) + +list_volume_attachments = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'volumeAttachments': { + 'type': 'array', + 'items': common_attach_volume_info + } + }, + 'required': ['volumeAttachments'] + } +} +list_volume_attachments['response_body']['properties'][ + 'volumeAttachments']['items']['properties'].update( + {'serverId': {'type': 'string'}}) + +list_addresses_by_network = { + 'status_code': [200], + 'response_body': parameter_types.addresses +} + +list_addresses = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'addresses': parameter_types.addresses + }, + 'required': ['addresses'] + } +} + +common_server_group = { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'name': {'type': 'string'}, + 'policies': { + 'type': 'array', + 'items': {'type': 'string'} + }, + # 'members' attribute contains the array of instance's UUID of + # instances present in server group + 'members': { + 'type': 'array', + 'items': {'type': 'string'} + }, + 'metadata': {'type': 'object'} + }, + 'required': ['id', 'name', 'policies', 'members', 'metadata'] +} + +create_get_server_group = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'server_group': common_server_group + }, + 'required': ['server_group'] + } +} + +delete_server_group = { + 'status_code': [204] +} + +list_server_groups = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'server_groups': { + 'type': 'array', + 'items': common_server_group + } + }, + 'required': ['server_groups'] + } +} + +instance_actions = { + 'type': 'object', + 'properties': { + 'action': {'type': 'string'}, + 'request_id': {'type': 'string'}, + 'user_id': {'type': 'string'}, + 'project_id': {'type': 'string'}, + 'start_time': {'type': 'string'}, + 'message': {'type': ['string', 'null']}, + 'instance_uuid': {'type': 'string'} + }, + 'required': ['action', 'request_id', 'user_id', 'project_id', + 'start_time', 'message', 'instance_uuid'] +} + +instance_action_events = { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'event': {'type': 'string'}, + 'start_time': {'type': 'string'}, + 'finish_time': {'type': 'string'}, + 'result': {'type': 'string'}, + 'traceback': {'type': ['string', 'null']} + }, + 'required': ['event', 'start_time', 'finish_time', 'result', + 'traceback'] + } +} + +list_instance_actions = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'instanceActions': { + 'type': 'array', + 'items': instance_actions + } + }, + 'required': ['instanceActions'] + } +} + +instance_actions_with_events = copy.deepcopy(instance_actions) +instance_actions_with_events['properties'].update({ + 'events': instance_action_events}) +# 'events' does not come in response body always so it is not +# defined as 'required' + +get_instance_action = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'instanceAction': instance_actions_with_events + }, + 'required': ['instanceAction'] + } +} + +get_password = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'password': {'type': 'string'} + }, + 'required': ['password'] + } +} + +get_vnc_console = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'console': { + 'type': 'object', + 'properties': { + 'type': {'type': 'string'}, + 'url': { + 'type': 'string', + 'format': 'uri' + } + }, + 'required': ['type', 'url'] + } + }, + 'required': ['console'] + } +} + +get_console_output = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'output': {'type': 'string'} + }, + 'required': ['output'] + } +} + +set_server_metadata = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'metadata': { + 'type': 'object', + 'patternProperties': { + '^.+$': {'type': 'string'} + } + } + }, + 'required': ['metadata'] + } +} + +list_server_metadata = copy.deepcopy(set_server_metadata) + +update_server_metadata = copy.deepcopy(set_server_metadata) + +delete_server_metadata_item = { + 'status_code': [204] +} + +set_get_server_metadata_item = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'meta': { + 'type': 'object', + 'patternProperties': { + '^.+$': {'type': 'string'} + } + } + }, + 'required': ['meta'] + } +} + +server_actions_common_schema = { + 'status_code': [202] +} + +server_actions_delete_password = { + 'status_code': [204] +} + +server_actions_confirm_resize = copy.deepcopy( + server_actions_delete_password) diff --git a/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/tenant_networks.py b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/tenant_networks.py new file mode 100644 index 000000000..0b2868a79 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/api_schema/response/compute/v2_1/tenant_networks.py @@ -0,0 +1,50 @@ +# Copyright 2015 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +param_network = { + 'type': 'object', + 'properties': { + 'id': {'type': 'string'}, + 'cidr': {'type': ['string', 'null']}, + 'label': {'type': 'string'} + }, + 'required': ['id', 'cidr', 'label'] +} + + +list_tenant_networks = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'networks': { + 'type': 'array', + 'items': param_network + } + }, + 'required': ['networks'] + } +} + + +get_tenant_network = { + 'status_code': [200], + 'response_body': { + 'type': 'object', + 'properties': { + 'network': param_network + }, + 'required': ['network'] + } +} diff --git a/neutron_lbaas/tests/tempest/lib/clients.py b/neutron_lbaas/tests/tempest/lib/clients.py index e97fa0da1..6ae4e8d88 100644 --- a/neutron_lbaas/tests/tempest/lib/clients.py +++ b/neutron_lbaas/tests/tempest/lib/clients.py @@ -20,7 +20,7 @@ from tempest_lib.services.identity.v2.token_client import TokenClientJSON from tempest_lib.services.identity.v3.token_client import V3TokenClientJSON from neutron_lbaas.tests.tempest.lib.common import cred_provider -from neutron_lbaas.tests.tempest.lib.common import negative_rest_client +# from neutron_lbaas.tests.tempest.lib.common import negative_rest_client from neutron_lbaas.tests.tempest.lib import config from neutron_lbaas.tests.tempest.lib import exceptions from neutron_lbaas.tests.tempest.lib import manager @@ -40,44 +40,44 @@ from neutron_lbaas.tests.tempest.lib import manager # from neutron_lbaas.tests.tempest.lib.services.compute.json.extensions_client import \ # ExtensionsClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.fixed_ips_client import FixedIPsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.flavors_client import FlavorsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.floating_ips_client import \ -# FloatingIPsClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.flavors_client import FlavorsClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.floating_ips_client import \ + FloatingIPsClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.hosts_client import HostsClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.hypervisor_client import \ # HypervisorClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.images_client import ImagesClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.images_client import ImagesClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.instance_usage_audit_log_client import \ # InstanceUsagesAuditLogClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.interfaces_client import \ -# InterfacesClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.keypairs_client import KeyPairsClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.interfaces_client import \ + InterfacesClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.keypairs_client import KeyPairsClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.limits_client import LimitsClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.migrations_client import \ # MigrationsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.networks_client import NetworksClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.networks_client import NetworksClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.quotas_client import QuotaClassesClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.quotas_client import QuotasClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.security_group_default_rules_client import \ # SecurityGroupDefaultRulesClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.security_groups_client import \ -# SecurityGroupsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.servers_client import ServersClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.security_groups_client import \ + SecurityGroupsClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.servers_client import ServersClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.services_client import ServicesClientJSON -# from neutron_lbaas.tests.tempest.lib.services.compute.json.tenant_networks_client import \ -# TenantNetworksClientJSON +from neutron_lbaas.tests.tempest.lib.services.compute.json.tenant_networks_client import \ + TenantNetworksClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.tenant_usages_client import \ # TenantUsagesClientJSON # from neutron_lbaas.tests.tempest.lib.services.compute.json.volumes_extensions_client import \ # VolumesExtensionsClientJSON # from neutron_lbaas.tests.tempest.lib.services.data_processing.v1_1.data_processing_client import \ # DataProcessingClient -# from neutron_lbaas.tests.tempest.lib.services.database.json.flavors_client import \ -# DatabaseFlavorsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.database.json.limits_client import \ -# DatabaseLimitsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.database.json.versions_client import \ -# DatabaseVersionsClientJSON +from neutron_lbaas.tests.tempest.lib.services.database.json.flavors_client import \ + DatabaseFlavorsClientJSON +from neutron_lbaas.tests.tempest.lib.services.database.json.limits_client import \ + DatabaseLimitsClientJSON +from neutron_lbaas.tests.tempest.lib.services.database.json.versions_client import \ + DatabaseVersionsClientJSON from neutron_lbaas.tests.tempest.lib.services.identity.v2.json.identity_client import \ IdentityClientJSON from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.credentials_client import \ @@ -90,16 +90,16 @@ from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.policy_client imp from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.region_client import RegionClientJSON from neutron_lbaas.tests.tempest.lib.services.identity.v3.json.service_client import \ ServiceClientJSON -# from neutron_lbaas.tests.tempest.lib.services.image.v1.json.image_client import ImageClientJSON -# from neutron_lbaas.tests.tempest.lib.services.image.v2.json.image_client import ImageClientV2JSON +from neutron_lbaas.tests.tempest.lib.services.image.v1.json.image_client import ImageClientJSON +from neutron_lbaas.tests.tempest.lib.services.image.v2.json.image_client import ImageClientV2JSON # from neutron_lbaas.tests.tempest.lib.services.messaging.json.messaging_client import \ # MessagingClientJSON from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import NetworkClientJSON # from neutron_lbaas.tests.tempest.lib.services.object_storage.account_client import AccountClient # from neutron_lbaas.tests.tempest.lib.services.object_storage.container_client import ContainerClient # from neutron_lbaas.tests.tempest.lib.services.object_storage.object_client import ObjectClient -# from neutron_lbaas.tests.tempest.lib.services.orchestration.json.orchestration_client import \ -# OrchestrationClient +from neutron_lbaas.tests.tempest.lib.services.orchestration.json.orchestration_client import \ + OrchestrationClient # from neutron_lbaas.tests.tempest.lib.services.telemetry.json.telemetry_client import \ # TelemetryClientJSON # from neutron_lbaas.tests.tempest.lib.services.volume.json.admin.volume_hosts_client import \ @@ -116,8 +116,8 @@ from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import # from neutron_lbaas.tests.tempest.lib.services.volume.json.extensions_client import \ # ExtensionsClientJSON as VolumeExtensionClientJSON # from neutron_lbaas.tests.tempest.lib.services.volume.json.qos_client import QosSpecsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.volume.json.snapshots_client import SnapshotsClientJSON -# from neutron_lbaas.tests.tempest.lib.services.volume.json.volumes_client import VolumesClientJSON +from neutron_lbaas.tests.tempest.lib.services.volume.json.snapshots_client import SnapshotsClientJSON +from neutron_lbaas.tests.tempest.lib.services.volume.json.volumes_client import VolumesClientJSON # from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_hosts_client import \ # VolumeHostsV2ClientJSON # from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.admin.volume_quotas_client import \ @@ -132,9 +132,9 @@ from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import # from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.extensions_client import \ # ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON # from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON -# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.snapshots_client import \ -# SnapshotsV2ClientJSON -# from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.volumes_client import VolumesV2ClientJSON +from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.snapshots_client import \ + SnapshotsV2ClientJSON +from neutron_lbaas.tests.tempest.lib.services.volume.v2.json.volumes_client import VolumesV2ClientJSON CONF = config.CONF LOG = logging.getLogger(__name__) @@ -164,10 +164,10 @@ class Manager(manager.Manager): def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials=credentials) - # self._set_compute_clients() - # self._set_database_clients() + self._set_compute_clients() + self._set_database_clients() self._set_identity_clients() - # self._set_volume_clients() + self._set_volume_clients() # self._set_object_storage_clients() # self.baremetal_client = BaremetalClientJSON( @@ -196,31 +196,31 @@ class Manager(manager.Manager): # CONF.identity.region, # endpoint_type=CONF.telemetry.endpoint_type, # **self.default_params_with_timeout_values) - # if CONF.service_available.glance: - # self.image_client = ImageClientJSON( - # self.auth_provider, - # CONF.image.catalog_type, - # CONF.image.region or CONF.identity.region, - # endpoint_type=CONF.image.endpoint_type, - # build_interval=CONF.image.build_interval, - # build_timeout=CONF.image.build_timeout, - # **self.default_params) - # self.image_client_v2 = ImageClientV2JSON( - # self.auth_provider, - # CONF.image.catalog_type, - # CONF.image.region or CONF.identity.region, - # endpoint_type=CONF.image.endpoint_type, - # build_interval=CONF.image.build_interval, - # build_timeout=CONF.image.build_timeout, - # **self.default_params) - # self.orchestration_client = OrchestrationClient( - # self.auth_provider, - # CONF.orchestration.catalog_type, - # CONF.orchestration.region or CONF.identity.region, - # endpoint_type=CONF.orchestration.endpoint_type, - # build_interval=CONF.orchestration.build_interval, - # build_timeout=CONF.orchestration.build_timeout, - # **self.default_params) + if CONF.service_available.glance: + self.image_client = ImageClientJSON( + self.auth_provider, + CONF.image.catalog_type, + CONF.image.region or CONF.identity.region, + endpoint_type=CONF.image.endpoint_type, + build_interval=CONF.image.build_interval, + build_timeout=CONF.image.build_timeout, + **self.default_params) + self.image_client_v2 = ImageClientV2JSON( + self.auth_provider, + CONF.image.catalog_type, + CONF.image.region or CONF.identity.region, + endpoint_type=CONF.image.endpoint_type, + build_interval=CONF.image.build_interval, + build_timeout=CONF.image.build_timeout, + **self.default_params) + self.orchestration_client = OrchestrationClient( + self.auth_provider, + CONF.orchestration.catalog_type, + CONF.orchestration.region or CONF.identity.region, + endpoint_type=CONF.orchestration.endpoint_type, + build_interval=CONF.orchestration.build_interval, + build_timeout=CONF.orchestration.build_timeout, + **self.default_params) # self.data_processing_client = DataProcessingClient( # self.auth_provider, # CONF.data_processing.catalog_type, @@ -239,44 +239,43 @@ class Manager(manager.Manager): # self.ec2api_client = botoclients.APIClientEC2(self.identity_client) # self.s3_client = botoclients.ObjectClientS3(self.identity_client) - # def _set_compute_clients(self): - # params = { - # 'service': CONF.compute.catalog_type, - # 'region': CONF.compute.region or CONF.identity.region, - # 'endpoint_type': CONF.compute.endpoint_type, - # 'build_interval': CONF.compute.build_interval, - # 'build_timeout': CONF.compute.build_timeout - # } - # params.update(self.default_params) + def _set_compute_clients(self): + params = { + 'service': CONF.compute.catalog_type, + 'region': CONF.compute.region or CONF.identity.region, + 'endpoint_type': CONF.compute.endpoint_type, + 'build_interval': CONF.compute.build_interval, + 'build_timeout': CONF.compute.build_timeout + } + params.update(self.default_params) # self.agents_client = AgentsClientJSON(self.auth_provider, **params) - # self.networks_client = NetworksClientJSON(self.auth_provider, **params) + self.networks_client = NetworksClientJSON(self.auth_provider, **params) # self.migrations_client = MigrationsClientJSON(self.auth_provider, # **params) # self.security_group_default_rules_client = ( # SecurityGroupDefaultRulesClientJSON(self.auth_provider, **params)) # self.certificates_client = CertificatesClientJSON(self.auth_provider, # **params) - # self.servers_client = ServersClientJSON( - # self.auth_provider, - # enable_instance_password=CONF.compute_feature_enabled - # .enable_instance_password, - # **params) + self.servers_client = ServersClientJSON( + self.auth_provider, + enable_instance_password=CONF.compute_feature_enabled + .enable_instance_password, **params) # self.limits_client = LimitsClientJSON(self.auth_provider, **params) - # self.images_client = ImagesClientJSON(self.auth_provider, **params) - # self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params) + self.images_client = ImagesClientJSON(self.auth_provider, **params) + self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params) # self.quotas_client = QuotasClientJSON(self.auth_provider, **params) # self.quota_classes_client = QuotaClassesClientJSON(self.auth_provider, # **params) - # self.flavors_client = FlavorsClientJSON(self.auth_provider, **params) + self.flavors_client = FlavorsClientJSON(self.auth_provider, **params) # self.extensions_client = ExtensionsClientJSON(self.auth_provider, # **params) - # self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider, - # **params) - # self.security_groups_client = SecurityGroupsClientJSON( - # self.auth_provider, **params) - # self.interfaces_client = InterfacesClientJSON(self.auth_provider, - # **params) + self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider, + **params) + self.security_groups_client = SecurityGroupsClientJSON( + self.auth_provider, **params) + self.interfaces_client = InterfacesClientJSON(self.auth_provider, + **params) # self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider, # **params) # self.availability_zone_client = AvailabilityZoneClientJSON( @@ -291,8 +290,8 @@ class Manager(manager.Manager): # **params) # self.instance_usages_audit_log_client = \ # InstanceUsagesAuditLogClientJSON(self.auth_provider, **params) - # self.tenant_networks_client = \ - # TenantNetworksClientJSON(self.auth_provider, **params) + self.tenant_networks_client = \ + TenantNetworksClientJSON(self.auth_provider, **params) # self.baremetal_nodes_client = BaremetalNodesClientJSON( # self.auth_provider, **params) @@ -307,22 +306,22 @@ class Manager(manager.Manager): # self.auth_provider, default_volume_size=CONF.volume.volume_size, # **params_volume) - # def _set_database_clients(self): - # self.database_flavors_client = DatabaseFlavorsClientJSON( - # self.auth_provider, - # CONF.database.catalog_type, - # CONF.identity.region, - # **self.default_params_with_timeout_values) - # self.database_limits_client = DatabaseLimitsClientJSON( - # self.auth_provider, - # CONF.database.catalog_type, - # CONF.identity.region, - # **self.default_params_with_timeout_values) - # self.database_versions_client = DatabaseVersionsClientJSON( - # self.auth_provider, - # CONF.database.catalog_type, - # CONF.identity.region, - # **self.default_params_with_timeout_values) + def _set_database_clients(self): + self.database_flavors_client = DatabaseFlavorsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + self.database_limits_client = DatabaseLimitsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + self.database_versions_client = DatabaseVersionsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) def _set_identity_clients(self): params = { @@ -361,15 +360,15 @@ class Manager(manager.Manager): msg = 'Identity v3 API enabled, but no identity.uri_v3 set' raise exceptions.InvalidConfiguration(msg) - # def _set_volume_clients(self): - # params = { - # 'service': CONF.volume.catalog_type, - # 'region': CONF.volume.region or CONF.identity.region, - # 'endpoint_type': CONF.volume.endpoint_type, - # 'build_interval': CONF.volume.build_interval, - # 'build_timeout': CONF.volume.build_timeout - # } - # params.update(self.default_params) + def _set_volume_clients(self): + params = { + 'service': CONF.volume.catalog_type, + 'region': CONF.volume.region or CONF.identity.region, + 'endpoint_type': CONF.volume.endpoint_type, + 'build_interval': CONF.volume.build_interval, + 'build_timeout': CONF.volume.build_timeout + } + params.update(self.default_params) # self.volume_qos_client = QosSpecsClientJSON(self.auth_provider, # **params) @@ -380,16 +379,16 @@ class Manager(manager.Manager): # self.backups_client = BackupsClientJSON(self.auth_provider, **params) # self.backups_v2_client = BackupsClientV2JSON(self.auth_provider, # **params) - # self.snapshots_client = SnapshotsClientJSON(self.auth_provider, - # **params) - # self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider, - # **params) - # self.volumes_client = VolumesClientJSON( - # self.auth_provider, default_volume_size=CONF.volume.volume_size, - # **params) - # self.volumes_v2_client = VolumesV2ClientJSON( - # self.auth_provider, default_volume_size=CONF.volume.volume_size, - # **params) + self.snapshots_client = SnapshotsClientJSON(self.auth_provider, + **params) + self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider, + **params) + self.volumes_client = VolumesClientJSON( + self.auth_provider, default_volume_size=CONF.volume.volume_size, + **params) + self.volumes_v2_client = VolumesV2ClientJSON( + self.auth_provider, default_volume_size=CONF.volume.volume_size, + **params) # self.volume_types_client = VolumeTypesClientJSON(self.auth_provider, # **params) # self.volume_services_client = VolumesServicesClientJSON( diff --git a/neutron_lbaas/tests/tempest/lib/common/fake_config.py b/neutron_lbaas/tests/tempest/lib/common/fake_config.py new file mode 100644 index 000000000..22a977667 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/common/fake_config.py @@ -0,0 +1,60 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_config import fixture as conf_fixture + +from neutron_lbaas.tests.tempest.lib import config + + +class ConfigFixture(conf_fixture.Config): + + def __init__(self): + config.register_opts() + super(ConfigFixture, self).__init__() + + def setUp(self): + super(ConfigFixture, self).setUp() + self.conf.set_default('build_interval', 10, group='compute') + self.conf.set_default('build_timeout', 10, group='compute') + self.conf.set_default('disable_ssl_certificate_validation', True, + group='identity') + self.conf.set_default('uri', 'http://fake_uri.com/auth', + group='identity') + self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth', + group='identity') + self.conf.set_default('neutron', True, group='service_available') + self.conf.set_default('heat', True, group='service_available') + if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))): + os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH'))) + lockutils.set_defaults( + lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')), + ) + self.conf.set_default('auth_version', 'v2', group='identity') + for config_option in ['username', 'password', 'tenant_name']: + # Identity group items + for prefix in ['', 'alt_', 'admin_']: + self.conf.set_default(prefix + config_option, + 'fake_' + config_option, + group='identity') + + +class FakePrivate(config.TempestConfigPrivate): + def __init__(self, parse_conf=True, config_path=None): + cfg.CONF([], default_config_files=[]) + self._set_attrs() + self.lock_path = cfg.CONF.lock_path diff --git a/neutron_lbaas/tests/tempest/lib/common/utils/linux/__init__.py b/neutron_lbaas/tests/tempest/lib/common/utils/linux/__init__.py new file mode 100644 index 000000000..04d898dfe --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/common/utils/linux/__init__.py @@ -0,0 +1,3 @@ +PING_IPV4_COMMAND = 'ping -c 3 ' +PING_IPV6_COMMAND = 'ping6 -c 3 ' +PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss' diff --git a/neutron_lbaas/tests/tempest/lib/common/utils/linux/remote_client.py b/neutron_lbaas/tests/tempest/lib/common/utils/linux/remote_client.py new file mode 100644 index 000000000..a7678cffa --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/common/utils/linux/remote_client.py @@ -0,0 +1,170 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import re +import time + +import six + +from neutron_lbaas.tests.tempest.lib import config +from neutron_lbaas.tests.tempest.lib import exceptions +from neutron_lbaas.tests.tempest.lib.common import ssh + +CONF = config.CONF + + +class RemoteClient(object): + + # NOTE(afazekas): It should always get an address instead of server + def __init__(self, server, username, password=None, pkey=None): + ssh_timeout = CONF.compute.ssh_timeout + network = CONF.compute.network_for_ssh + ip_version = CONF.compute.ip_version_for_ssh + ssh_channel_timeout = CONF.compute.ssh_channel_timeout + if isinstance(server, six.string_types): + ip_address = server + else: + addresses = server['addresses'][network] + for address in addresses: + if address['version'] == ip_version: + ip_address = address['addr'] + break + else: + raise exceptions.ServerUnreachable() + self.ssh_client = ssh.Client(ip_address, username, password, + ssh_timeout, pkey=pkey, + channel_timeout=ssh_channel_timeout) + + def exec_command(self, cmd): + # Shell options below add more clearness on failures, + # path is extended for some non-cirros guest oses (centos7) + cmd = "set -eu -o pipefail; PATH=$PATH:/sbin; " + cmd + return self.ssh_client.exec_command(cmd) + + def validate_authentication(self): + """Validate ssh connection and authentication + This method raises an Exception when the validation fails. + """ + self.ssh_client.test_connection_auth() + + def hostname_equals_servername(self, expected_hostname): + # Get host name using command "hostname" + actual_hostname = self.exec_command("hostname").rstrip() + return expected_hostname == actual_hostname + + def get_ram_size_in_mb(self): + output = self.exec_command('free -m | grep Mem') + if output: + return output.split()[1] + + def get_number_of_vcpus(self): + command = 'cat /proc/cpuinfo | grep processor | wc -l' + output = self.exec_command(command) + return int(output) + + def get_partitions(self): + # Return the contents of /proc/partitions + command = 'cat /proc/partitions' + output = self.exec_command(command) + return output + + def get_boot_time(self): + cmd = 'cut -f1 -d. /proc/uptime' + boot_secs = self.exec_command(cmd) + boot_time = time.time() - int(boot_secs) + return time.localtime(boot_time) + + def write_to_console(self, message): + message = re.sub("([$\\`])", "\\\\\\\\\\1", message) + # usually to /dev/ttyS0 + cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message + return self.exec_command(cmd) + + def ping_host(self, host, count=CONF.compute.ping_count, + size=CONF.compute.ping_size): + addr = netaddr.IPAddress(host) + cmd = 'ping6' if addr.version == 6 else 'ping' + cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host) + return self.exec_command(cmd) + + def get_mac_address(self): + cmd = "ip addr | awk '/ether/ {print $2}'" + return self.exec_command(cmd) + + def get_nic_name(self, address): + cmd = "ip -o addr | awk '/%s/ {print $2}'" % address + return self.exec_command(cmd) + + def get_ip_list(self): + cmd = "ip address" + return self.exec_command(cmd) + + def assign_static_ip(self, nic, addr): + cmd = "sudo ip addr add {ip}/{mask} dev {nic}".format( + ip=addr, mask=CONF.network.tenant_network_mask_bits, + nic=nic + ) + return self.exec_command(cmd) + + def turn_nic_on(self, nic): + cmd = "sudo ip link set {nic} up".format(nic=nic) + return self.exec_command(cmd) + + def get_pids(self, pr_name): + # Get pid(s) of a process/program + cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name + return self.exec_command(cmd).split('\n') + + def get_dns_servers(self): + cmd = 'cat /etc/resolv.conf' + resolve_file = self.exec_command(cmd).strip().split('\n') + entries = (l.split() for l in resolve_file) + dns_servers = [l[1] for l in entries + if len(l) and l[0] == 'nameserver'] + return dns_servers + + def send_signal(self, pid, signum): + cmd = 'sudo /bin/kill -{sig} {pid}'.format(pid=pid, sig=signum) + return self.exec_command(cmd) + + def _renew_lease_udhcpc(self, fixed_ip=None): + """Renews DHCP lease via udhcpc client. """ + file_path = '/var/run/udhcpc.' + nic_name = self.get_nic_name(fixed_ip) + nic_name = nic_name.strip().lower() + pid = self.exec_command('cat {path}{nic}.pid'. + format(path=file_path, nic=nic_name)) + pid = pid.strip() + self.send_signal(pid, 'USR1') + + def _renew_lease_dhclient(self, fixed_ip=None): + """Renews DHCP lease via dhclient client. """ + cmd = "sudo /sbin/dhclient -r && sudo /sbin/dhclient" + self.exec_command(cmd) + + def renew_lease(self, fixed_ip=None): + """Wrapper method for renewing DHCP lease via given client + + Supporting: + * udhcpc + * dhclient + """ + # TODO(yfried): add support for dhcpcd + suported_clients = ['udhcpc', 'dhclient'] + dhcp_client = CONF.scenario.dhcp_client + if dhcp_client not in suported_clients: + raise exceptions.InvalidConfiguration('%s DHCP client unsupported' + % dhcp_client) + if dhcp_client == 'udhcpc' and not fixed_ip: + raise ValueError("need to set 'fixed_ip' for udhcpc client") + return getattr(self, '_renew_lease_' + dhcp_client)(fixed_ip=fixed_ip) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/__init__.py b/neutron_lbaas/tests/tempest/lib/services/compute/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/flavors_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/flavors_client.py new file mode 100644 index 000000000..5eb3b765b --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/flavors_client.py @@ -0,0 +1,178 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from six.moves.urllib import parse as urllib + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import flavors as schema +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import flavors_access as schema_access +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import flavors_extra_specs as schema_extra_specs +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class FlavorsClientJSON(service_client.ServiceClient): + + def list_flavors(self, params=None): + url = 'flavors' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_flavors, resp, body) + return service_client.ResponseBodyList(resp, body['flavors']) + + def list_flavors_with_detail(self, params=None): + url = 'flavors/detail' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_flavors_details, resp, body) + return service_client.ResponseBodyList(resp, body['flavors']) + + def show_flavor(self, flavor_id): + resp, body = self.get("flavors/%s" % str(flavor_id)) + body = json.loads(body) + self.validate_response(schema.create_get_flavor_details, resp, body) + return service_client.ResponseBody(resp, body['flavor']) + + def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs): + """Creates a new flavor or instance type.""" + post_body = { + 'name': name, + 'ram': ram, + 'vcpus': vcpus, + 'disk': disk, + 'id': flavor_id, + } + if kwargs.get('ephemeral'): + post_body['OS-FLV-EXT-DATA:ephemeral'] = kwargs.get('ephemeral') + if kwargs.get('swap'): + post_body['swap'] = kwargs.get('swap') + if kwargs.get('rxtx'): + post_body['rxtx_factor'] = kwargs.get('rxtx') + if kwargs.get('is_public'): + post_body['os-flavor-access:is_public'] = kwargs.get('is_public') + post_body = json.dumps({'flavor': post_body}) + resp, body = self.post('flavors', post_body) + + body = json.loads(body) + self.validate_response(schema.create_get_flavor_details, resp, body) + return service_client.ResponseBody(resp, body['flavor']) + + def delete_flavor(self, flavor_id): + """Deletes the given flavor.""" + resp, body = self.delete("flavors/{0}".format(flavor_id)) + self.validate_response(schema.delete_flavor, resp, body) + return service_client.ResponseBody(resp, body) + + def is_resource_deleted(self, id): + # Did not use show_flavor(id) for verification as it gives + # 200 ok even for deleted id. LP #981263 + # we can remove the loop here and use get by ID when bug gets sortedout + flavors = self.list_flavors_with_detail() + for flavor in flavors: + if flavor['id'] == id: + return False + return True + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'flavor' + + def set_flavor_extra_spec(self, flavor_id, specs): + """Sets extra Specs to the mentioned flavor.""" + post_body = json.dumps({'extra_specs': specs}) + resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id, + post_body) + body = json.loads(body) + self.validate_response(schema_extra_specs.set_get_flavor_extra_specs, + resp, body) + return service_client.ResponseBody(resp, body['extra_specs']) + + def list_flavor_extra_specs(self, flavor_id): + """Gets extra Specs details of the mentioned flavor.""" + resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id) + body = json.loads(body) + self.validate_response(schema_extra_specs.set_get_flavor_extra_specs, + resp, body) + return service_client.ResponseBody(resp, body['extra_specs']) + + def show_flavor_extra_spec(self, flavor_id, key): + """Gets extra Specs key-value of the mentioned flavor and key.""" + resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id), + key)) + body = json.loads(body) + self.validate_response( + schema_extra_specs.set_get_flavor_extra_specs_key, + resp, body) + return service_client.ResponseBody(resp, body) + + def update_flavor_extra_spec(self, flavor_id, key, **kwargs): + """Update specified extra Specs of the mentioned flavor and key.""" + resp, body = self.put('flavors/%s/os-extra_specs/%s' % + (flavor_id, key), json.dumps(kwargs)) + body = json.loads(body) + self.validate_response( + schema_extra_specs.set_get_flavor_extra_specs_key, + resp, body) + return service_client.ResponseBody(resp, body) + + def unset_flavor_extra_spec(self, flavor_id, key): + """Unsets extra Specs from the mentioned flavor.""" + resp, body = self.delete('flavors/%s/os-extra_specs/%s' % + (str(flavor_id), key)) + self.validate_response(schema.unset_flavor_extra_specs, resp, body) + return service_client.ResponseBody(resp, body) + + def list_flavor_access(self, flavor_id): + """Gets flavor access information given the flavor id.""" + resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id) + body = json.loads(body) + self.validate_response(schema_access.add_remove_list_flavor_access, + resp, body) + return service_client.ResponseBodyList(resp, body['flavor_access']) + + def add_flavor_access(self, flavor_id, tenant_id): + """Add flavor access for the specified tenant.""" + post_body = { + 'addTenantAccess': { + 'tenant': tenant_id + } + } + post_body = json.dumps(post_body) + resp, body = self.post('flavors/%s/action' % flavor_id, post_body) + body = json.loads(body) + self.validate_response(schema_access.add_remove_list_flavor_access, + resp, body) + return service_client.ResponseBodyList(resp, body['flavor_access']) + + def remove_flavor_access(self, flavor_id, tenant_id): + """Remove flavor access from the specified tenant.""" + post_body = { + 'removeTenantAccess': { + 'tenant': tenant_id + } + } + post_body = json.dumps(post_body) + resp, body = self.post('flavors/%s/action' % flavor_id, post_body) + body = json.loads(body) + self.validate_response(schema_access.add_remove_list_flavor_access, + resp, body) + return service_client.ResponseBody(resp, body['flavor_access']) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/floating_ips_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/floating_ips_client.py new file mode 100644 index 000000000..9873b4359 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/floating_ips_client.py @@ -0,0 +1,142 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class FloatingIPsClientJSON(service_client.ServiceClient): + + def list_floating_ips(self, params=None): + """Returns a list of all floating IPs filtered by any parameters.""" + url = 'os-floating-ips' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_floating_ips, resp, body) + return service_client.ResponseBodyList(resp, body['floating_ips']) + + def show_floating_ip(self, floating_ip_id): + """Get the details of a floating IP.""" + url = "os-floating-ips/%s" % str(floating_ip_id) + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.create_get_floating_ip, resp, body) + return service_client.ResponseBody(resp, body['floating_ip']) + + def create_floating_ip(self, pool_name=None): + """Allocate a floating IP to the project.""" + url = 'os-floating-ips' + post_body = {'pool': pool_name} + post_body = json.dumps(post_body) + resp, body = self.post(url, post_body) + body = json.loads(body) + self.validate_response(schema.create_get_floating_ip, resp, body) + return service_client.ResponseBody(resp, body['floating_ip']) + + def delete_floating_ip(self, floating_ip_id): + """Deletes the provided floating IP from the project.""" + url = "os-floating-ips/%s" % str(floating_ip_id) + resp, body = self.delete(url) + self.validate_response(schema.add_remove_floating_ip, resp, body) + return service_client.ResponseBody(resp, body) + + def associate_floating_ip_to_server(self, floating_ip, server_id): + """Associate the provided floating IP to a specific server.""" + url = "servers/%s/action" % str(server_id) + post_body = { + 'addFloatingIp': { + 'address': floating_ip, + } + } + + post_body = json.dumps(post_body) + resp, body = self.post(url, post_body) + self.validate_response(schema.add_remove_floating_ip, resp, body) + return service_client.ResponseBody(resp, body) + + def disassociate_floating_ip_from_server(self, floating_ip, server_id): + """Disassociate the provided floating IP from a specific server.""" + url = "servers/%s/action" % str(server_id) + post_body = { + 'removeFloatingIp': { + 'address': floating_ip, + } + } + + post_body = json.dumps(post_body) + resp, body = self.post(url, post_body) + self.validate_response(schema.add_remove_floating_ip, resp, body) + return service_client.ResponseBody(resp, body) + + def is_resource_deleted(self, id): + try: + self.show_floating_ip(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'floating_ip' + + def list_floating_ip_pools(self, params=None): + """Returns a list of all floating IP Pools.""" + url = 'os-floating-ip-pools' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_floating_ip_pools, resp, body) + return service_client.ResponseBodyList(resp, body['floating_ip_pools']) + + def create_floating_ips_bulk(self, ip_range, pool, interface): + """Allocate floating IPs in bulk.""" + post_body = { + 'ip_range': ip_range, + 'pool': pool, + 'interface': interface + } + post_body = json.dumps({'floating_ips_bulk_create': post_body}) + resp, body = self.post('os-floating-ips-bulk', post_body) + body = json.loads(body) + self.validate_response(schema.create_floating_ips_bulk, resp, body) + return service_client.ResponseBody(resp, + body['floating_ips_bulk_create']) + + def list_floating_ips_bulk(self): + """Returns a list of all floating IPs bulk.""" + resp, body = self.get('os-floating-ips-bulk') + body = json.loads(body) + self.validate_response(schema.list_floating_ips_bulk, resp, body) + return service_client.ResponseBodyList(resp, body['floating_ip_info']) + + def delete_floating_ips_bulk(self, ip_range): + """Deletes the provided floating IPs bulk.""" + post_body = json.dumps({'ip_range': ip_range}) + resp, body = self.put('os-floating-ips-bulk/delete', post_body) + body = json.loads(body) + self.validate_response(schema.delete_floating_ips_bulk, resp, body) + data = body['floating_ips_bulk_delete'] + return service_client.ResponseBodyData(resp, data) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/images_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/images_client.py new file mode 100644 index 000000000..bb8e42820 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/images_client.py @@ -0,0 +1,142 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import images as schema +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib.common import waiters + + +class ImagesClientJSON(service_client.ServiceClient): + + def create_image(self, server_id, name, meta=None): + """Creates an image of the original server.""" + + post_body = { + 'createImage': { + 'name': name, + } + } + + if meta is not None: + post_body['createImage']['metadata'] = meta + + post_body = json.dumps(post_body) + resp, body = self.post('servers/%s/action' % str(server_id), + post_body) + self.validate_response(schema.create_image, resp, body) + return service_client.ResponseBody(resp, body) + + def list_images(self, params=None): + """Returns a list of all images filtered by any parameters.""" + url = 'images' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_images, resp, body) + return service_client.ResponseBodyList(resp, body['images']) + + def list_images_with_detail(self, params=None): + """Returns a detailed list of images filtered by any parameters.""" + url = 'images/detail' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_images_details, resp, body) + return service_client.ResponseBodyList(resp, body['images']) + + def show_image(self, image_id): + """Returns the details of a single image.""" + resp, body = self.get("images/%s" % str(image_id)) + self.expected_success(200, resp.status) + body = json.loads(body) + self.validate_response(schema.get_image, resp, body) + return service_client.ResponseBody(resp, body['image']) + + def delete_image(self, image_id): + """Deletes the provided image.""" + resp, body = self.delete("images/%s" % str(image_id)) + self.validate_response(schema.delete, resp, body) + return service_client.ResponseBody(resp, body) + + def wait_for_image_status(self, image_id, status): + """Waits for an image to reach a given status.""" + waiters.wait_for_image_status(self, image_id, status) + + def list_image_metadata(self, image_id): + """Lists all metadata items for an image.""" + resp, body = self.get("images/%s/metadata" % str(image_id)) + body = json.loads(body) + self.validate_response(schema.image_metadata, resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def set_image_metadata(self, image_id, meta): + """Sets the metadata for an image.""" + post_body = json.dumps({'metadata': meta}) + resp, body = self.put('images/%s/metadata' % str(image_id), post_body) + body = json.loads(body) + self.validate_response(schema.image_metadata, resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def update_image_metadata(self, image_id, meta): + """Updates the metadata for an image.""" + post_body = json.dumps({'metadata': meta}) + resp, body = self.post('images/%s/metadata' % str(image_id), post_body) + body = json.loads(body) + self.validate_response(schema.image_metadata, resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def get_image_metadata_item(self, image_id, key): + """Returns the value for a specific image metadata key.""" + resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key)) + body = json.loads(body) + self.validate_response(schema.image_meta_item, resp, body) + return service_client.ResponseBody(resp, body['meta']) + + def set_image_metadata_item(self, image_id, key, meta): + """Sets the value for a specific image metadata key.""" + post_body = json.dumps({'meta': meta}) + resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key), + post_body) + body = json.loads(body) + self.validate_response(schema.image_meta_item, resp, body) + return service_client.ResponseBody(resp, body['meta']) + + def delete_image_metadata_item(self, image_id, key): + """Deletes a single image metadata key/value pair.""" + resp, body = self.delete("images/%s/metadata/%s" % + (str(image_id), key)) + self.validate_response(schema.delete, resp, body) + return service_client.ResponseBody(resp, body) + + def is_resource_deleted(self, id): + try: + self.show_image(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'image' diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/interfaces_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/interfaces_client.py new file mode 100644 index 000000000..b7adcdef6 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/interfaces_client.py @@ -0,0 +1,109 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import servers as servers_schema +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib import exceptions + + +class InterfacesClientJSON(service_client.ServiceClient): + + def list_interfaces(self, server): + resp, body = self.get('servers/%s/os-interface' % server) + body = json.loads(body) + self.validate_response(schema.list_interfaces, resp, body) + return service_client.ResponseBodyList(resp, + body['interfaceAttachments']) + + def create_interface(self, server, port_id=None, network_id=None, + fixed_ip=None): + post_body = dict(interfaceAttachment=dict()) + if port_id: + post_body['interfaceAttachment']['port_id'] = port_id + if network_id: + post_body['interfaceAttachment']['net_id'] = network_id + if fixed_ip: + fip = dict(ip_address=fixed_ip) + post_body['interfaceAttachment']['fixed_ips'] = [fip] + post_body = json.dumps(post_body) + resp, body = self.post('servers/%s/os-interface' % server, + body=post_body) + body = json.loads(body) + self.validate_response(schema.get_create_interfaces, resp, body) + return service_client.ResponseBody(resp, body['interfaceAttachment']) + + def show_interface(self, server, port_id): + resp, body = self.get('servers/%s/os-interface/%s' % (server, port_id)) + body = json.loads(body) + self.validate_response(schema.get_create_interfaces, resp, body) + return service_client.ResponseBody(resp, body['interfaceAttachment']) + + def delete_interface(self, server, port_id): + resp, body = self.delete('servers/%s/os-interface/%s' % (server, + port_id)) + self.validate_response(schema.delete_interface, resp, body) + return service_client.ResponseBody(resp, body) + + def wait_for_interface_status(self, server, port_id, status): + """Waits for a interface to reach a given status.""" + body = self.show_interface(server, port_id) + interface_status = body['port_state'] + start = int(time.time()) + + while(interface_status != status): + time.sleep(self.build_interval) + body = self.show_interface(server, port_id) + interface_status = body['port_state'] + + timed_out = int(time.time()) - start >= self.build_timeout + + if interface_status != status and timed_out: + message = ('Interface %s failed to reach %s status ' + '(current %s) within the required time (%s s).' % + (port_id, status, interface_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + + return body + + def add_fixed_ip(self, server_id, network_id): + """Add a fixed IP to input server instance.""" + post_body = json.dumps({ + 'addFixedIp': { + 'networkId': network_id + } + }) + resp, body = self.post('servers/%s/action' % str(server_id), + post_body) + self.validate_response(servers_schema.server_actions_common_schema, + resp, body) + return service_client.ResponseBody(resp, body) + + def remove_fixed_ip(self, server_id, ip_address): + """Remove input fixed IP from input server instance.""" + post_body = json.dumps({ + 'removeFixedIp': { + 'address': ip_address + } + }) + resp, body = self.post('servers/%s/action' % str(server_id), + post_body) + self.validate_response(servers_schema.server_actions_common_schema, + resp, body) + return service_client.ResponseBody(resp, body) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/keypairs_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/keypairs_client.py new file mode 100644 index 000000000..9f22cbd6b --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/keypairs_client.py @@ -0,0 +1,54 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import keypairs as schema +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class KeyPairsClientJSON(service_client.ServiceClient): + + def list_keypairs(self): + resp, body = self.get("os-keypairs") + body = json.loads(body) + # Each returned keypair is embedded within an unnecessary 'keypair' + # element which is a deviation from other resources like floating-ips, + # servers, etc. A bug? + # For now we shall adhere to the spec, but the spec for keypairs + # is yet to be found + self.validate_response(schema.list_keypairs, resp, body) + return service_client.ResponseBodyList(resp, body['keypairs']) + + def get_keypair(self, key_name): + resp, body = self.get("os-keypairs/%s" % str(key_name)) + body = json.loads(body) + self.validate_response(schema.get_keypair, resp, body) + return service_client.ResponseBody(resp, body['keypair']) + + def create_keypair(self, name, pub_key=None): + post_body = {'keypair': {'name': name}} + if pub_key: + post_body['keypair']['public_key'] = pub_key + post_body = json.dumps(post_body) + resp, body = self.post("os-keypairs", body=post_body) + body = json.loads(body) + self.validate_response(schema.create_keypair, resp, body) + return service_client.ResponseBody(resp, body['keypair']) + + def delete_keypair(self, key_name): + resp, body = self.delete("os-keypairs/%s" % str(key_name)) + self.validate_response(schema.delete_keypair, resp, body) + return service_client.ResponseBody(resp, body) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/networks_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/networks_client.py new file mode 100644 index 000000000..1551520aa --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/networks_client.py @@ -0,0 +1,37 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class NetworksClientJSON(service_client.ServiceClient): + + def list_networks(self, name=None): + resp, body = self.get("os-networks") + body = json.loads(body) + self.expected_success(200, resp.status) + if name: + networks = [n for n in body['networks'] if n['label'] == name] + else: + networks = body['networks'] + return service_client.ResponseBodyList(resp, networks) + + def get_network(self, network_id): + resp, body = self.get("os-networks/%s" % str(network_id)) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['network']) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/security_groups_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/security_groups_client.py new file mode 100644 index 000000000..cc273434c --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/security_groups_client.py @@ -0,0 +1,144 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import security_groups as schema +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class SecurityGroupsClientJSON(service_client.ServiceClient): + + def list_security_groups(self, params=None): + """List all security groups for a user.""" + + url = 'os-security-groups' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_security_groups, resp, body) + return service_client.ResponseBodyList(resp, body['security_groups']) + + def get_security_group(self, security_group_id): + """Get the details of a Security Group.""" + url = "os-security-groups/%s" % str(security_group_id) + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.get_security_group, resp, body) + return service_client.ResponseBody(resp, body['security_group']) + + def create_security_group(self, name, description): + """ + Creates a new security group. + name (Required): Name of security group. + description (Required): Description of security group. + """ + post_body = { + 'name': name, + 'description': description, + } + post_body = json.dumps({'security_group': post_body}) + resp, body = self.post('os-security-groups', post_body) + body = json.loads(body) + self.validate_response(schema.get_security_group, resp, body) + return service_client.ResponseBody(resp, body['security_group']) + + def update_security_group(self, security_group_id, name=None, + description=None): + """ + Update a security group. + security_group_id: a security_group to update + name: new name of security group + description: new description of security group + """ + post_body = {} + if name: + post_body['name'] = name + if description: + post_body['description'] = description + post_body = json.dumps({'security_group': post_body}) + resp, body = self.put('os-security-groups/%s' % str(security_group_id), + post_body) + body = json.loads(body) + self.validate_response(schema.update_security_group, resp, body) + return service_client.ResponseBody(resp, body['security_group']) + + def delete_security_group(self, security_group_id): + """Deletes the provided Security Group.""" + resp, body = self.delete( + 'os-security-groups/%s' % str(security_group_id)) + self.validate_response(schema.delete_security_group, resp, body) + return service_client.ResponseBody(resp, body) + + def create_security_group_rule(self, parent_group_id, ip_proto, from_port, + to_port, **kwargs): + """ + Creating a new security group rules. + parent_group_id :ID of Security group + ip_protocol : ip_proto (icmp, tcp, udp). + from_port: Port at start of range. + to_port : Port at end of range. + Following optional keyword arguments are accepted: + cidr : CIDR for address range. + group_id : ID of the Source group + """ + post_body = { + 'parent_group_id': parent_group_id, + 'ip_protocol': ip_proto, + 'from_port': from_port, + 'to_port': to_port, + 'cidr': kwargs.get('cidr'), + 'group_id': kwargs.get('group_id'), + } + post_body = json.dumps({'security_group_rule': post_body}) + url = 'os-security-group-rules' + resp, body = self.post(url, post_body) + body = json.loads(body) + self.validate_response(schema.create_security_group_rule, resp, body) + return service_client.ResponseBody(resp, body['security_group_rule']) + + def delete_security_group_rule(self, group_rule_id): + """Deletes the provided Security Group rule.""" + resp, body = self.delete('os-security-group-rules/%s' % + str(group_rule_id)) + self.validate_response(schema.delete_security_group_rule, resp, body) + return service_client.ResponseBody(resp, body) + + def list_security_group_rules(self, security_group_id): + """List all rules for a security group.""" + resp, body = self.get('os-security-groups') + body = json.loads(body) + self.validate_response(schema.list_security_groups, resp, body) + for sg in body['security_groups']: + if sg['id'] == security_group_id: + return service_client.ResponseBodyList(resp, sg['rules']) + raise lib_exc.NotFound('No such Security Group') + + def is_resource_deleted(self, id): + try: + self.get_security_group(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'security_group' diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/servers_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/servers_client.py new file mode 100644 index 000000000..e4ef66e8c --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/servers_client.py @@ -0,0 +1,572 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import servers as schema +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib.common import waiters +from neutron_lbaas.tests.tempest.lib import exceptions + + +class ServersClientJSON(service_client.ServiceClient): + + def __init__(self, auth_provider, service, region, + enable_instance_password=True, **kwargs): + super(ServersClientJSON, self).__init__( + auth_provider, service, region, **kwargs) + self.enable_instance_password = enable_instance_password + + def create_server(self, name, image_ref, flavor_ref, **kwargs): + """ + Creates an instance of a server. + name (Required): The name of the server. + image_ref (Required): Reference to the image used to build the server. + flavor_ref (Required): The flavor used to build the server. + Following optional keyword arguments are accepted: + adminPass: Sets the initial root password. + key_name: Key name of keypair that was created earlier. + meta: A dictionary of values to be used as metadata. + personality: A list of dictionaries for files to be injected into + the server. + security_groups: A list of security group dicts. + networks: A list of network dicts with UUID and fixed_ip. + user_data: User data for instance. + availability_zone: Availability zone in which to launch instance. + accessIPv4: The IPv4 access address for the server. + accessIPv6: The IPv6 access address for the server. + min_count: Count of minimum number of instances to launch. + max_count: Count of maximum number of instances to launch. + disk_config: Determines if user or admin controls disk configuration. + return_reservation_id: Enable/Disable the return of reservation id + block_device_mapping: Block device mapping for the server. + block_device_mapping_v2: Block device mapping V2 for the server. + """ + post_body = { + 'name': name, + 'imageRef': image_ref, + 'flavorRef': flavor_ref + } + + for option in ['personality', 'adminPass', 'key_name', + 'security_groups', 'networks', 'user_data', + 'availability_zone', 'accessIPv4', 'accessIPv6', + 'min_count', 'max_count', ('metadata', 'meta'), + ('OS-DCF:diskConfig', 'disk_config'), + 'return_reservation_id', 'block_device_mapping', + 'block_device_mapping_v2']: + if isinstance(option, tuple): + post_param = option[0] + key = option[1] + else: + post_param = option + key = option + value = kwargs.get(key) + if value is not None: + post_body[post_param] = value + + post_body = {'server': post_body} + + if 'sched_hints' in kwargs: + hints = {'os:scheduler_hints': kwargs.get('sched_hints')} + post_body = dict(post_body.items() + hints.items()) + post_body = json.dumps(post_body) + resp, body = self.post('servers', post_body) + + body = json.loads(body) + # NOTE(maurosr): this deals with the case of multiple server create + # with return reservation id set True + if 'reservation_id' in body: + return service_client.ResponseBody(resp, body) + if self.enable_instance_password: + create_schema = schema.create_server_with_admin_pass + else: + create_schema = schema.create_server + self.validate_response(create_schema, resp, body) + return service_client.ResponseBody(resp, body['server']) + + def update_server(self, server_id, name=None, meta=None, accessIPv4=None, + accessIPv6=None, disk_config=None): + """ + Updates the properties of an existing server. + server_id: The id of an existing server. + name: The name of the server. + personality: A list of files to be injected into the server. + accessIPv4: The IPv4 access address for the server. + accessIPv6: The IPv6 access address for the server. + """ + + post_body = {} + + if meta is not None: + post_body['metadata'] = meta + + if name is not None: + post_body['name'] = name + + if accessIPv4 is not None: + post_body['accessIPv4'] = accessIPv4 + + if accessIPv6 is not None: + post_body['accessIPv6'] = accessIPv6 + + if disk_config is not None: + post_body['OS-DCF:diskConfig'] = disk_config + + post_body = json.dumps({'server': post_body}) + resp, body = self.put("servers/%s" % str(server_id), post_body) + body = json.loads(body) + self.validate_response(schema.update_server, resp, body) + return service_client.ResponseBody(resp, body['server']) + + def get_server(self, server_id): + """Returns the details of an existing server.""" + resp, body = self.get("servers/%s" % str(server_id)) + body = json.loads(body) + self.validate_response(schema.get_server, resp, body) + return service_client.ResponseBody(resp, body['server']) + + def delete_server(self, server_id): + """Deletes the given server.""" + resp, body = self.delete("servers/%s" % str(server_id)) + self.validate_response(schema.delete_server, resp, body) + return service_client.ResponseBody(resp, body) + + def list_servers(self, params=None): + """Lists all servers for a user.""" + + url = 'servers' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_servers, resp, body) + return service_client.ResponseBody(resp, body) + + def list_servers_with_detail(self, params=None): + """Lists all servers in detail for a user.""" + + url = 'servers/detail' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.validate_response(schema.list_servers_detail, resp, body) + return service_client.ResponseBody(resp, body) + + def wait_for_server_status(self, server_id, status, extra_timeout=0, + raise_on_error=True, ready_wait=True): + """Waits for a server to reach a given status.""" + return waiters.wait_for_server_status(self, server_id, status, + extra_timeout=extra_timeout, + raise_on_error=raise_on_error, + ready_wait=ready_wait) + + def wait_for_server_termination(self, server_id, ignore_error=False): + """Waits for server to reach termination.""" + start_time = int(time.time()) + while True: + try: + body = self.get_server(server_id) + except lib_exc.NotFound: + return + + server_status = body['status'] + if server_status == 'ERROR' and not ignore_error: + raise exceptions.BuildErrorException(server_id=server_id) + + if int(time.time()) - start_time >= self.build_timeout: + raise exceptions.TimeoutException + + time.sleep(self.build_interval) + + def list_addresses(self, server_id): + """Lists all addresses for a server.""" + resp, body = self.get("servers/%s/ips" % str(server_id)) + body = json.loads(body) + self.validate_response(schema.list_addresses, resp, body) + return service_client.ResponseBody(resp, body['addresses']) + + def list_addresses_by_network(self, server_id, network_id): + """Lists all addresses of a specific network type for a server.""" + resp, body = self.get("servers/%s/ips/%s" % + (str(server_id), network_id)) + body = json.loads(body) + self.validate_response(schema.list_addresses_by_network, resp, body) + return service_client.ResponseBody(resp, body) + + def action(self, server_id, action_name, response_key, + schema=schema.server_actions_common_schema, + response_class=service_client.ResponseBody, **kwargs): + post_body = json.dumps({action_name: kwargs}) + resp, body = self.post('servers/%s/action' % str(server_id), + post_body) + if response_key is not None: + body = json.loads(body) + # Check for Schema as 'None' because if we do not have any server + # action schema implemented yet then they can pass 'None' to skip + # the validation.Once all server action has their schema + # implemented then, this check can be removed if every actions are + # supposed to validate their response. + # TODO(GMann): Remove the below 'if' check once all server actions + # schema are implemented. + if schema is not None: + self.validate_response(schema, resp, body) + body = body[response_key] + else: + self.validate_response(schema, resp, body) + return response_class(resp, body) + + def create_backup(self, server_id, backup_type, rotation, name): + """Backup a server instance.""" + return self.action(server_id, "createBackup", None, + backup_type=backup_type, + rotation=rotation, + name=name) + + def change_password(self, server_id, adminPass): + """Changes the root password for the server.""" + return self.action(server_id, 'changePassword', None, + adminPass=adminPass) + + def get_password(self, server_id): + resp, body = self.get("servers/%s/os-server-password" % + str(server_id)) + body = json.loads(body) + self.validate_response(schema.get_password, resp, body) + return service_client.ResponseBody(resp, body) + + def delete_password(self, server_id): + """ + Removes the encrypted server password from the metadata server + Note that this does not actually change the instance server + password. + """ + resp, body = self.delete("servers/%s/os-server-password" % + str(server_id)) + self.validate_response(schema.server_actions_delete_password, + resp, body) + return service_client.ResponseBody(resp, body) + + def reboot(self, server_id, reboot_type): + """Reboots a server.""" + return self.action(server_id, 'reboot', None, type=reboot_type) + + def rebuild(self, server_id, image_ref, **kwargs): + """Rebuilds a server with a new image.""" + kwargs['imageRef'] = image_ref + if 'disk_config' in kwargs: + kwargs['OS-DCF:diskConfig'] = kwargs['disk_config'] + del kwargs['disk_config'] + if self.enable_instance_password: + rebuild_schema = schema.rebuild_server_with_admin_pass + else: + rebuild_schema = schema.rebuild_server + return self.action(server_id, 'rebuild', 'server', + rebuild_schema, **kwargs) + + def resize(self, server_id, flavor_ref, **kwargs): + """Changes the flavor of a server.""" + kwargs['flavorRef'] = flavor_ref + if 'disk_config' in kwargs: + kwargs['OS-DCF:diskConfig'] = kwargs['disk_config'] + del kwargs['disk_config'] + return self.action(server_id, 'resize', None, **kwargs) + + def confirm_resize(self, server_id, **kwargs): + """Confirms the flavor change for a server.""" + return self.action(server_id, 'confirmResize', + None, schema.server_actions_confirm_resize, + **kwargs) + + def revert_resize(self, server_id, **kwargs): + """Reverts a server back to its original flavor.""" + return self.action(server_id, 'revertResize', None, **kwargs) + + def list_server_metadata(self, server_id): + resp, body = self.get("servers/%s/metadata" % str(server_id)) + body = json.loads(body) + self.validate_response(schema.list_server_metadata, resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def set_server_metadata(self, server_id, meta, no_metadata_field=False): + if no_metadata_field: + post_body = "" + else: + post_body = json.dumps({'metadata': meta}) + resp, body = self.put('servers/%s/metadata' % str(server_id), + post_body) + body = json.loads(body) + self.validate_response(schema.set_server_metadata, resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def update_server_metadata(self, server_id, meta): + post_body = json.dumps({'metadata': meta}) + resp, body = self.post('servers/%s/metadata' % str(server_id), + post_body) + body = json.loads(body) + self.validate_response(schema.update_server_metadata, + resp, body) + return service_client.ResponseBody(resp, body['metadata']) + + def get_server_metadata_item(self, server_id, key): + resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key)) + body = json.loads(body) + self.validate_response(schema.set_get_server_metadata_item, + resp, body) + return service_client.ResponseBody(resp, body['meta']) + + def set_server_metadata_item(self, server_id, key, meta): + post_body = json.dumps({'meta': meta}) + resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key), + post_body) + body = json.loads(body) + self.validate_response(schema.set_get_server_metadata_item, + resp, body) + return service_client.ResponseBody(resp, body['meta']) + + def delete_server_metadata_item(self, server_id, key): + resp, body = self.delete("servers/%s/metadata/%s" % + (str(server_id), key)) + self.validate_response(schema.delete_server_metadata_item, + resp, body) + return service_client.ResponseBody(resp, body) + + def stop(self, server_id, **kwargs): + return self.action(server_id, 'os-stop', None, **kwargs) + + def start(self, server_id, **kwargs): + return self.action(server_id, 'os-start', None, **kwargs) + + def attach_volume(self, server_id, volume_id, device='/dev/vdz'): + """Attaches a volume to a server instance.""" + post_body = json.dumps({ + 'volumeAttachment': { + 'volumeId': volume_id, + 'device': device, + } + }) + resp, body = self.post('servers/%s/os-volume_attachments' % server_id, + post_body) + body = json.loads(body) + self.validate_response(schema.attach_volume, resp, body) + return service_client.ResponseBody(resp, body['volumeAttachment']) + + def detach_volume(self, server_id, volume_id): + """Detaches a volume from a server instance.""" + resp, body = self.delete('servers/%s/os-volume_attachments/%s' % + (server_id, volume_id)) + self.validate_response(schema.detach_volume, resp, body) + return service_client.ResponseBody(resp, body) + + def get_volume_attachment(self, server_id, attach_id): + """Return details about the given volume attachment.""" + resp, body = self.get('servers/%s/os-volume_attachments/%s' % ( + str(server_id), attach_id)) + body = json.loads(body) + self.validate_response(schema.get_volume_attachment, resp, body) + return service_client.ResponseBody(resp, body['volumeAttachment']) + + def list_volume_attachments(self, server_id): + """Returns the list of volume attachments for a given instance.""" + resp, body = self.get('servers/%s/os-volume_attachments' % ( + str(server_id))) + body = json.loads(body) + self.validate_response(schema.list_volume_attachments, resp, body) + return service_client.ResponseBodyList(resp, body['volumeAttachments']) + + def add_security_group(self, server_id, name): + """Adds a security group to the server.""" + return self.action(server_id, 'addSecurityGroup', None, name=name) + + def remove_security_group(self, server_id, name): + """Removes a security group from the server.""" + return self.action(server_id, 'removeSecurityGroup', None, name=name) + + def live_migrate_server(self, server_id, dest_host, use_block_migration): + """This should be called with administrator privileges .""" + + migrate_params = { + "disk_over_commit": False, + "block_migration": use_block_migration, + "host": dest_host + } + + req_body = json.dumps({'os-migrateLive': migrate_params}) + + resp, body = self.post("servers/%s/action" % str(server_id), req_body) + self.validate_response(schema.server_actions_common_schema, + resp, body) + return service_client.ResponseBody(resp, body) + + def migrate_server(self, server_id, **kwargs): + """Migrates a server to a new host.""" + return self.action(server_id, 'migrate', None, **kwargs) + + def lock_server(self, server_id, **kwargs): + """Locks the given server.""" + return self.action(server_id, 'lock', None, **kwargs) + + def unlock_server(self, server_id, **kwargs): + """UNlocks the given server.""" + return self.action(server_id, 'unlock', None, **kwargs) + + def suspend_server(self, server_id, **kwargs): + """Suspends the provided server.""" + return self.action(server_id, 'suspend', None, **kwargs) + + def resume_server(self, server_id, **kwargs): + """Un-suspends the provided server.""" + return self.action(server_id, 'resume', None, **kwargs) + + def pause_server(self, server_id, **kwargs): + """Pauses the provided server.""" + return self.action(server_id, 'pause', None, **kwargs) + + def unpause_server(self, server_id, **kwargs): + """Un-pauses the provided server.""" + return self.action(server_id, 'unpause', None, **kwargs) + + def reset_state(self, server_id, state='error'): + """Resets the state of a server to active/error.""" + return self.action(server_id, 'os-resetState', None, state=state) + + def shelve_server(self, server_id, **kwargs): + """Shelves the provided server.""" + return self.action(server_id, 'shelve', None, **kwargs) + + def unshelve_server(self, server_id, **kwargs): + """Un-shelves the provided server.""" + return self.action(server_id, 'unshelve', None, **kwargs) + + def shelve_offload_server(self, server_id, **kwargs): + """Shelve-offload the provided server.""" + return self.action(server_id, 'shelveOffload', None, **kwargs) + + def get_console_output(self, server_id, length): + kwargs = {'length': length} if length else {} + return self.action(server_id, 'os-getConsoleOutput', 'output', + schema.get_console_output, + response_class=service_client.ResponseBodyData, + **kwargs) + + def list_virtual_interfaces(self, server_id): + """ + List the virtual interfaces used in an instance. + """ + resp, body = self.get('/'.join(['servers', server_id, + 'os-virtual-interfaces'])) + body = json.loads(body) + self.validate_response(schema.list_virtual_interfaces, resp, body) + return service_client.ResponseBody(resp, body) + + def rescue_server(self, server_id, **kwargs): + """Rescue the provided server.""" + return self.action(server_id, 'rescue', 'adminPass', + schema.rescue_server, + response_class=service_client.ResponseBodyData, + **kwargs) + + def unrescue_server(self, server_id): + """Unrescue the provided server.""" + return self.action(server_id, 'unrescue', None) + + def get_server_diagnostics(self, server_id): + """Get the usage data for a server.""" + resp, body = self.get("servers/%s/diagnostics" % str(server_id)) + return service_client.ResponseBody(resp, json.loads(body)) + + def list_instance_actions(self, server_id): + """List the provided server action.""" + resp, body = self.get("servers/%s/os-instance-actions" % + str(server_id)) + body = json.loads(body) + self.validate_response(schema.list_instance_actions, resp, body) + return service_client.ResponseBodyList(resp, body['instanceActions']) + + def get_instance_action(self, server_id, request_id): + """Returns the action details of the provided server.""" + resp, body = self.get("servers/%s/os-instance-actions/%s" % + (str(server_id), str(request_id))) + body = json.loads(body) + self.validate_response(schema.get_instance_action, resp, body) + return service_client.ResponseBody(resp, body['instanceAction']) + + def force_delete_server(self, server_id, **kwargs): + """Force delete a server.""" + return self.action(server_id, 'forceDelete', None, **kwargs) + + def restore_soft_deleted_server(self, server_id, **kwargs): + """Restore a soft-deleted server.""" + return self.action(server_id, 'restore', None, **kwargs) + + def reset_network(self, server_id, **kwargs): + """Resets the Network of a server""" + return self.action(server_id, 'resetNetwork', None, **kwargs) + + def inject_network_info(self, server_id, **kwargs): + """Inject the Network Info into server""" + return self.action(server_id, 'injectNetworkInfo', None, **kwargs) + + def get_vnc_console(self, server_id, console_type): + """Get URL of VNC console.""" + return self.action(server_id, "os-getVNCConsole", + "console", schema.get_vnc_console, + type=console_type) + + def create_server_group(self, name, policies): + """ + Create the server group + name : Name of the server-group + policies : List of the policies - affinity/anti-affinity) + """ + post_body = { + 'name': name, + 'policies': policies, + } + + post_body = json.dumps({'server_group': post_body}) + resp, body = self.post('os-server-groups', post_body) + + body = json.loads(body) + self.validate_response(schema.create_get_server_group, resp, body) + return service_client.ResponseBody(resp, body['server_group']) + + def delete_server_group(self, server_group_id): + """Delete the given server-group.""" + resp, body = self.delete("os-server-groups/%s" % str(server_group_id)) + self.validate_response(schema.delete_server_group, resp, body) + return service_client.ResponseBody(resp, body) + + def list_server_groups(self): + """List the server-groups.""" + resp, body = self.get("os-server-groups") + body = json.loads(body) + self.validate_response(schema.list_server_groups, resp, body) + return service_client.ResponseBodyList(resp, body['server_groups']) + + def get_server_group(self, server_group_id): + """Get the details of given server_group.""" + resp, body = self.get("os-server-groups/%s" % str(server_group_id)) + body = json.loads(body) + self.validate_response(schema.create_get_server_group, resp, body) + return service_client.ResponseBody(resp, body['server_group']) diff --git a/neutron_lbaas/tests/tempest/lib/services/compute/json/tenant_networks_client.py b/neutron_lbaas/tests/tempest/lib/services/compute/json/tenant_networks_client.py new file mode 100644 index 000000000..660eb7f25 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/compute/json/tenant_networks_client.py @@ -0,0 +1,33 @@ +# Copyright 2015 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from neutron_lbaas.tests.tempest.lib.api_schema.response.compute.v2_1 import tenant_networks as schema +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class TenantNetworksClientJSON(service_client.ServiceClient): + + def list_tenant_networks(self): + resp, body = self.get("os-tenant-networks") + body = json.loads(body) + self.validate_response(schema.list_tenant_networks, resp, body) + return service_client.ResponseBodyList(resp, body['networks']) + + def get_tenant_network(self, network_id): + resp, body = self.get("os-tenant-networks/%s" % str(network_id)) + body = json.loads(body) + self.validate_response(schema.get_tenant_network, resp, body) + return service_client.ResponseBody(resp, body['network']) diff --git a/neutron_lbaas/tests/tempest/lib/services/database/__init__.py b/neutron_lbaas/tests/tempest/lib/services/database/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/database/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/database/json/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/database/json/flavors_client.py b/neutron_lbaas/tests/tempest/lib/services/database/json/flavors_client.py new file mode 100644 index 000000000..9146e160a --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/database/json/flavors_client.py @@ -0,0 +1,35 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urllib + +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class DatabaseFlavorsClientJSON(service_client.ServiceClient): + + def list_db_flavors(self, params=None): + url = 'flavors' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, self._parse_resp(body)) + + def get_db_flavor_details(self, db_flavor_id): + resp, body = self.get("flavors/%s" % str(db_flavor_id)) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, self._parse_resp(body)) diff --git a/neutron_lbaas/tests/tempest/lib/services/database/json/limits_client.py b/neutron_lbaas/tests/tempest/lib/services/database/json/limits_client.py new file mode 100644 index 000000000..fe404a58e --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/database/json/limits_client.py @@ -0,0 +1,30 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six.moves.urllib import parse as urllib + +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class DatabaseLimitsClientJSON(service_client.ServiceClient): + + def list_db_limits(self, params=None): + """List all limits.""" + url = 'limits' + if params: + url += '?%s' % urllib.urlencode(params) + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, self._parse_resp(body)) diff --git a/neutron_lbaas/tests/tempest/lib/services/database/json/versions_client.py b/neutron_lbaas/tests/tempest/lib/services/database/json/versions_client.py new file mode 100644 index 000000000..322ac6451 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/database/json/versions_client.py @@ -0,0 +1,46 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six.moves.urllib import parse as urllib + +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class DatabaseVersionsClientJSON(service_client.ServiceClient): + + def __init__(self, auth_provider, service, region, + endpoint_type=None, build_interval=None, build_timeout=None, + disable_ssl_certificate_validation=None, ca_certs=None, + trace_requests=None): + dscv = disable_ssl_certificate_validation + super(DatabaseVersionsClientJSON, self).__init__( + auth_provider, service, region, + endpoint_type=endpoint_type, + build_interval=build_interval, + build_timeout=build_timeout, + disable_ssl_certificate_validation=dscv, + ca_certs=ca_certs, + trace_requests=trace_requests) + self.skip_path() + + def list_db_versions(self, params=None): + """List all versions.""" + url = '' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, self._parse_resp(body)) diff --git a/neutron_lbaas/tests/tempest/lib/services/image/__init__.py b/neutron_lbaas/tests/tempest/lib/services/image/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v1/__init__.py b/neutron_lbaas/tests/tempest/lib/services/image/v1/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v1/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v1/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/image/v1/json/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v1/json/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v1/json/image_client.py b/neutron_lbaas/tests/tempest/lib/services/image/v1/json/image_client.py new file mode 100644 index 000000000..819df0c80 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v1/json/image_client.py @@ -0,0 +1,318 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import errno +import json +import os +import time + +from oslo_log import log as logging +import six +from six.moves.urllib import parse as urllib +from tempest_lib.common.utils import misc as misc_utils +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.common import glance_http +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib import exceptions + +LOG = logging.getLogger(__name__) + + +class ImageClientJSON(service_client.ServiceClient): + + def __init__(self, auth_provider, catalog_type, region, endpoint_type=None, + build_interval=None, build_timeout=None, + disable_ssl_certificate_validation=None, + ca_certs=None, trace_requests=None): + super(ImageClientJSON, self).__init__( + auth_provider, + catalog_type, + region, + endpoint_type=endpoint_type, + build_interval=build_interval, + build_timeout=build_timeout, + disable_ssl_certificate_validation=( + disable_ssl_certificate_validation), + ca_certs=ca_certs, + trace_requests=trace_requests) + self._http = None + self.dscv = disable_ssl_certificate_validation + self.ca_certs = ca_certs + + def _image_meta_from_headers(self, headers): + meta = {'properties': {}} + for key, value in six.iteritems(headers): + if key.startswith('x-image-meta-property-'): + _key = key[22:] + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = key[13:] + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes', + '1') + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) + except ValueError: + pass + return meta + + def _image_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + copy_from = fields_copy.pop('copy_from', None) + if copy_from is not None: + headers['x-glance-api-copy-from'] = copy_from + for key, value in six.iteritems(fields_copy.pop('properties', {})): + headers['x-image-meta-property-%s' % key] = str(value) + for key, value in six.iteritems(fields_copy.pop('api', {})): + headers['x-glance-api-property-%s' % key] = str(value) + for key, value in six.iteritems(fields_copy): + headers['x-image-meta-%s' % key] = str(value) + return headers + + def _get_file_size(self, obj): + """Analyze file-like object and attempt to determine its size. + + :param obj: file-like object, typically redirected from stdin. + :retval The file's size or None if it cannot be determined. + """ + # For large images, we need to supply the size of the + # image file. See LP Bugs #827660 and #845788. + if hasattr(obj, 'seek') and hasattr(obj, 'tell'): + try: + obj.seek(0, os.SEEK_END) + obj_size = obj.tell() + obj.seek(0) + return obj_size + except IOError as e: + if e.errno == errno.ESPIPE: + # Illegal seek. This means the user is trying + # to pipe image data to the client, e.g. + # echo testdata | bin/glance add blah..., or + # that stdin is empty, or that a file-like + # object which doesn't support 'seek/tell' has + # been supplied. + return None + else: + raise + else: + # Cannot determine size of input image + return None + + def _get_http(self): + return glance_http.HTTPClient(auth_provider=self.auth_provider, + filters=self.filters, + insecure=self.dscv, + ca_certs=self.ca_certs) + + def _create_with_data(self, headers, data): + resp, body_iter = self.http.raw_request('POST', '/v1/images', + headers=headers, body=data) + self._error_checker('POST', '/v1/images', headers, data, resp, + body_iter) + body = json.loads(''.join([c for c in body_iter])) + return service_client.ResponseBody(resp, body['image']) + + def _update_with_data(self, image_id, headers, data): + url = '/v1/images/%s' % image_id + resp, body_iter = self.http.raw_request('PUT', url, headers=headers, + body=data) + self._error_checker('PUT', url, headers, data, + resp, body_iter) + body = json.loads(''.join([c for c in body_iter])) + return service_client.ResponseBody(resp, body['image']) + + @property + def http(self): + if self._http is None: + self._http = self._get_http() + return self._http + + def create_image(self, name, container_format, disk_format, **kwargs): + params = { + "name": name, + "container_format": container_format, + "disk_format": disk_format, + } + + headers = {} + + for option in ['is_public', 'location', 'properties', + 'copy_from', 'min_ram']: + if option in kwargs: + params[option] = kwargs.get(option) + + headers.update(self._image_meta_to_headers(params)) + + if 'data' in kwargs: + return self._create_with_data(headers, kwargs.get('data')) + + resp, body = self.post('v1/images', None, headers) + self.expected_success(201, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['image']) + + def update_image(self, image_id, name=None, container_format=None, + data=None, properties=None): + params = {} + headers = {} + if name is not None: + params['name'] = name + + if container_format is not None: + params['container_format'] = container_format + + if properties is not None: + params['properties'] = properties + + headers.update(self._image_meta_to_headers(params)) + + if data is not None: + return self._update_with_data(image_id, headers, data) + + url = 'v1/images/%s' % image_id + resp, body = self.put(url, data, headers) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['image']) + + def delete_image(self, image_id): + url = 'v1/images/%s' % image_id + resp, body = self.delete(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def list_images(self, detail=False, properties=dict(), + changes_since=None, **kwargs): + url = 'v1/images' + + if detail: + url += '/detail' + + params = {} + for key, value in properties.items(): + params['property-%s' % key] = value + + kwargs.update(params) + + if changes_since is not None: + kwargs['changes-since'] = changes_since + + if len(kwargs) > 0: + url += '?%s' % urllib.urlencode(kwargs) + + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['images']) + + def get_image_meta(self, image_id): + url = 'v1/images/%s' % image_id + resp, __ = self.head(url) + self.expected_success(200, resp.status) + body = self._image_meta_from_headers(resp) + return service_client.ResponseBody(resp, body) + + def show_image(self, image_id): + url = 'v1/images/%s' % image_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBodyData(resp, body) + + def is_resource_deleted(self, id): + try: + self.get_image_meta(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'image_meta' + + def list_image_members(self, image_id): + url = 'v1/images/%s/members' % image_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def list_shared_images(self, tenant_id): + """List shared images with the specified tenant""" + url = 'v1/shared-images/%s' % tenant_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def add_member(self, member_id, image_id, can_share=False): + url = 'v1/images/%s/members/%s' % (image_id, member_id) + body = None + if can_share: + body = json.dumps({'member': {'can_share': True}}) + resp, __ = self.put(url, body) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def delete_member(self, member_id, image_id): + url = 'v1/images/%s/members/%s' % (image_id, member_id) + resp, __ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + # NOTE(afazekas): just for the wait function + def _get_image_status(self, image_id): + meta = self.get_image_meta(image_id) + status = meta['status'] + return status + + # NOTE(afazkas): Wait reinvented again. It is not in the correct layer + def wait_for_image_status(self, image_id, status): + """Waits for a Image to reach a given status.""" + start_time = time.time() + old_value = value = self._get_image_status(image_id) + while True: + dtime = time.time() - start_time + time.sleep(self.build_interval) + if value != old_value: + LOG.info('Value transition from "%s" to "%s"' + 'in %d second(s).', old_value, + value, dtime) + if value == status: + return value + + if value == 'killed': + raise exceptions.ImageKilledException(image_id=image_id, + status=status) + if dtime > self.build_timeout: + message = ('Time Limit Exceeded! (%ds)' + 'while waiting for %s, ' + 'but we got %s.' % + (self.build_timeout, status, value)) + caller = misc_utils.find_test_caller() + if caller: + message = '(%s) %s' % (caller, message) + raise exceptions.TimeoutException(message) + time.sleep(self.build_interval) + old_value = value + value = self._get_image_status(image_id) diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v2/__init__.py b/neutron_lbaas/tests/tempest/lib/services/image/v2/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v2/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v2/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/image/v2/json/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v2/json/__init__.py @@ -0,0 +1 @@ + diff --git a/neutron_lbaas/tests/tempest/lib/services/image/v2/json/image_client.py b/neutron_lbaas/tests/tempest/lib/services/image/v2/json/image_client.py new file mode 100644 index 000000000..c8463393c --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/image/v2/json/image_client.py @@ -0,0 +1,214 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import jsonschema +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.common import glance_http +from neutron_lbaas.tests.tempest.lib.common import service_client + + +class ImageClientV2JSON(service_client.ServiceClient): + + def __init__(self, auth_provider, catalog_type, region, endpoint_type=None, + build_interval=None, build_timeout=None, + disable_ssl_certificate_validation=None, ca_certs=None, + trace_requests=None): + super(ImageClientV2JSON, self).__init__( + auth_provider, + catalog_type, + region, + endpoint_type=endpoint_type, + build_interval=build_interval, + build_timeout=build_timeout, + disable_ssl_certificate_validation=( + disable_ssl_certificate_validation), + ca_certs=ca_certs, + trace_requests=trace_requests) + self._http = None + self.dscv = disable_ssl_certificate_validation + self.ca_certs = ca_certs + + def _get_http(self): + return glance_http.HTTPClient(auth_provider=self.auth_provider, + filters=self.filters, + insecure=self.dscv, + ca_certs=self.ca_certs) + + def _validate_schema(self, body, type='image'): + if type in ['image', 'images']: + schema = self.get_schema(type) + else: + raise ValueError("%s is not a valid schema type" % type) + + jsonschema.validate(body, schema) + + @property + def http(self): + if self._http is None: + self._http = self._get_http() + return self._http + + def update_image(self, image_id, patch): + data = json.dumps(patch) + self._validate_schema(data) + + headers = {"Content-Type": "application/openstack-images-v2.0" + "-json-patch"} + resp, body = self.patch('v2/images/%s' % image_id, data, headers) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, self._parse_resp(body)) + + def create_image(self, name, container_format, disk_format, **kwargs): + params = { + "name": name, + "container_format": container_format, + "disk_format": disk_format, + } + + for option in kwargs: + value = kwargs.get(option) + if isinstance(value, dict) or isinstance(value, tuple): + params.update(value) + else: + params[option] = value + + data = json.dumps(params) + self._validate_schema(data) + + resp, body = self.post('v2/images', data) + self.expected_success(201, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def deactivate_image(self, image_id): + url = 'v2/images/%s/actions/deactivate' % image_id + resp, body = self.post(url, None) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def reactivate_image(self, image_id): + url = 'v2/images/%s/actions/reactivate' % image_id + resp, body = self.post(url, None) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def delete_image(self, image_id): + url = 'v2/images/%s' % image_id + resp, _ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def list_images(self, params=None): + url = 'v2/images' + + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + self._validate_schema(body, type='images') + return service_client.ResponseBodyList(resp, body['images']) + + def show_image(self, image_id): + url = 'v2/images/%s' % image_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def is_resource_deleted(self, id): + try: + self.show_image(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'image' + + def store_image(self, image_id, data): + url = 'v2/images/%s/file' % image_id + headers = {'Content-Type': 'application/octet-stream'} + resp, body = self.http.raw_request('PUT', url, headers=headers, + body=data) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def get_image_file(self, image_id): + url = 'v2/images/%s/file' % image_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBodyData(resp, body) + + def add_image_tag(self, image_id, tag): + url = 'v2/images/%s/tags/%s' % (image_id, tag) + resp, body = self.put(url, body=None) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def delete_image_tag(self, image_id, tag): + url = 'v2/images/%s/tags/%s' % (image_id, tag) + resp, _ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def list_image_members(self, image_id): + url = 'v2/images/%s/members' % image_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def add_image_member(self, image_id, member_id): + url = 'v2/images/%s/members' % image_id + data = json.dumps({'member': member_id}) + resp, body = self.post(url, data) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def update_image_member(self, image_id, member_id, body): + url = 'v2/images/%s/members/%s' % (image_id, member_id) + data = json.dumps(body) + resp, body = self.put(url, data) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def show_image_member(self, image_id, member_id): + url = 'v2/images/%s/members/%s' % (image_id, member_id) + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, json.loads(body)) + + def remove_image_member(self, image_id, member_id): + url = 'v2/images/%s/members/%s' % (image_id, member_id) + resp, _ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def get_schema(self, schema): + url = 'v2/schemas/%s' % schema + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) diff --git a/neutron_lbaas/tests/tempest/lib/services/orchestration/__init__.py b/neutron_lbaas/tests/tempest/lib/services/orchestration/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/orchestration/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/orchestration/json/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/orchestration/json/orchestration_client.py b/neutron_lbaas/tests/tempest/lib/services/orchestration/json/orchestration_client.py new file mode 100644 index 000000000..675397fed --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/orchestration/json/orchestration_client.py @@ -0,0 +1,449 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import re +import time + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib import exceptions + + +class OrchestrationClient(service_client.ServiceClient): + + def list_stacks(self, params=None): + """Lists all stacks for a user.""" + + uri = 'stacks' + if params: + uri += '?%s' % urllib.urlencode(params) + + resp, body = self.get(uri) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['stacks']) + + def create_stack(self, name, disable_rollback=True, parameters=None, + timeout_mins=60, template=None, template_url=None, + environment=None, files=None): + if parameters is None: + parameters = {} + headers, body = self._prepare_update_create( + name, + disable_rollback, + parameters, + timeout_mins, + template, + template_url, + environment, + files) + uri = 'stacks' + resp, body = self.post(uri, headers=headers, body=body) + self.expected_success(201, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def update_stack(self, stack_identifier, name, disable_rollback=True, + parameters=None, timeout_mins=60, template=None, + template_url=None, environment=None, files=None): + if parameters is None: + parameters = {} + headers, body = self._prepare_update_create( + name, + disable_rollback, + parameters, + timeout_mins, + template, + template_url, + environment) + + uri = "stacks/%s" % stack_identifier + resp, body = self.put(uri, headers=headers, body=body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def _prepare_update_create(self, name, disable_rollback=True, + parameters=None, timeout_mins=60, + template=None, template_url=None, + environment=None, files=None): + if parameters is None: + parameters = {} + post_body = { + "stack_name": name, + "disable_rollback": disable_rollback, + "parameters": parameters, + "timeout_mins": timeout_mins, + "template": "HeatTemplateFormatVersion: '2012-12-12'\n", + "environment": environment, + "files": files + } + if template: + post_body['template'] = template + if template_url: + post_body['template_url'] = template_url + body = json.dumps(post_body) + + # Password must be provided on stack create so that heat + # can perform future operations on behalf of the user + headers = self.get_headers() + headers['X-Auth-Key'] = self.password + headers['X-Auth-User'] = self.user + return headers, body + + def show_stack(self, stack_identifier): + """Returns the details of a single stack.""" + url = "stacks/%s" % stack_identifier + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['stack']) + + def suspend_stack(self, stack_identifier): + """Suspend a stack.""" + url = 'stacks/%s/actions' % stack_identifier + body = {'suspend': None} + resp, body = self.post(url, json.dumps(body)) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp) + + def resume_stack(self, stack_identifier): + """Resume a stack.""" + url = 'stacks/%s/actions' % stack_identifier + body = {'resume': None} + resp, body = self.post(url, json.dumps(body)) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp) + + def list_resources(self, stack_identifier): + """Returns the details of a single resource.""" + url = "stacks/%s/resources" % stack_identifier + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['resources']) + + def show_resource(self, stack_identifier, resource_name): + """Returns the details of a single resource.""" + url = "stacks/%s/resources/%s" % (stack_identifier, resource_name) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['resource']) + + def delete_stack(self, stack_identifier): + """Deletes the specified Stack.""" + resp, _ = self.delete("stacks/%s" % str(stack_identifier)) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def wait_for_resource_status(self, stack_identifier, resource_name, + status, failure_pattern='^.*_FAILED$'): + """Waits for a Resource to reach a given status.""" + start = int(time.time()) + fail_regexp = re.compile(failure_pattern) + + while True: + try: + body = self.show_resource( + stack_identifier, resource_name) + except lib_exc.NotFound: + # ignore this, as the resource may not have + # been created yet + pass + else: + resource_name = body['resource_name'] + resource_status = body['resource_status'] + if resource_status == status: + return + if fail_regexp.search(resource_status): + raise exceptions.StackResourceBuildErrorException( + resource_name=resource_name, + stack_identifier=stack_identifier, + resource_status=resource_status, + resource_status_reason=body['resource_status_reason']) + + if int(time.time()) - start >= self.build_timeout: + message = ('Resource %s failed to reach %s status ' + '(current %s) within the required time (%s s).' % + (resource_name, + status, + resource_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + time.sleep(self.build_interval) + + def wait_for_stack_status(self, stack_identifier, status, + failure_pattern='^.*_FAILED$'): + """Waits for a Stack to reach a given status.""" + start = int(time.time()) + fail_regexp = re.compile(failure_pattern) + + while True: + try: + body = self.show_stack(stack_identifier) + except lib_exc.NotFound: + if status == 'DELETE_COMPLETE': + return + stack_name = body['stack_name'] + stack_status = body['stack_status'] + if stack_status == status: + return body + if fail_regexp.search(stack_status): + raise exceptions.StackBuildErrorException( + stack_identifier=stack_identifier, + stack_status=stack_status, + stack_status_reason=body['stack_status_reason']) + + if int(time.time()) - start >= self.build_timeout: + message = ('Stack %s failed to reach %s status (current: %s) ' + 'within the required time (%s s).' % + (stack_name, status, stack_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + time.sleep(self.build_interval) + + def show_resource_metadata(self, stack_identifier, resource_name): + """Returns the resource's metadata.""" + url = ('stacks/{stack_identifier}/resources/{resource_name}' + '/metadata'.format(**locals())) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['metadata']) + + def list_events(self, stack_identifier): + """Returns list of all events for a stack.""" + url = 'stacks/{stack_identifier}/events'.format(**locals()) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['events']) + + def list_resource_events(self, stack_identifier, resource_name): + """Returns list of all events for a resource from stack.""" + url = ('stacks/{stack_identifier}/resources/{resource_name}' + '/events'.format(**locals())) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['events']) + + def show_event(self, stack_identifier, resource_name, event_id): + """Returns the details of a single stack's event.""" + url = ('stacks/{stack_identifier}/resources/{resource_name}/events' + '/{event_id}'.format(**locals())) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body['event']) + + def show_template(self, stack_identifier): + """Returns the template for the stack.""" + url = ('stacks/{stack_identifier}/template'.format(**locals())) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def _validate_template(self, post_body): + """Returns the validation request result.""" + post_body = json.dumps(post_body) + resp, body = self.post('validate', post_body) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def validate_template(self, template, parameters=None): + """Returns the validation result for a template with parameters.""" + if parameters is None: + parameters = {} + post_body = { + 'template': template, + 'parameters': parameters, + } + return self._validate_template(post_body) + + def validate_template_url(self, template_url, parameters=None): + """Returns the validation result for a template with parameters.""" + if parameters is None: + parameters = {} + post_body = { + 'template_url': template_url, + 'parameters': parameters, + } + return self._validate_template(post_body) + + def list_resource_types(self): + """List resource types.""" + resp, body = self.get('resource_types') + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBodyList(resp, body['resource_types']) + + def show_resource_type(self, resource_type_name): + """Return the schema of a resource type.""" + url = 'resource_types/%s' % resource_type_name + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, json.loads(body)) + + def show_resource_type_template(self, resource_type_name): + """Return the template of a resource type.""" + url = 'resource_types/%s/template' % resource_type_name + resp, body = self.get(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, json.loads(body)) + + def create_software_config(self, name=None, config=None, group=None, + inputs=None, outputs=None, options=None): + headers, body = self._prep_software_config_create( + name, config, group, inputs, outputs, options) + + url = 'software_configs' + resp, body = self.post(url, headers=headers, body=body) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def show_software_config(self, conf_id): + """Returns a software configuration resource.""" + url = 'software_configs/%s' % str(conf_id) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def delete_software_config(self, conf_id): + """Deletes a specific software configuration.""" + url = 'software_configs/%s' % str(conf_id) + resp, _ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def create_software_deploy(self, server_id=None, config_id=None, + action=None, status=None, + input_values=None, output_values=None, + status_reason=None, signal_transport=None): + """Creates or updates a software deployment.""" + headers, body = self._prep_software_deploy_update( + None, server_id, config_id, action, status, input_values, + output_values, status_reason, signal_transport) + + url = 'software_deployments' + resp, body = self.post(url, headers=headers, body=body) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def update_software_deploy(self, deploy_id=None, server_id=None, + config_id=None, action=None, status=None, + input_values=None, output_values=None, + status_reason=None, signal_transport=None): + """Creates or updates a software deployment.""" + headers, body = self._prep_software_deploy_update( + deploy_id, server_id, config_id, action, status, input_values, + output_values, status_reason, signal_transport) + + url = 'software_deployments/%s' % str(deploy_id) + resp, body = self.put(url, headers=headers, body=body) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def list_software_deployments(self): + """Returns a list of all deployments.""" + url = 'software_deployments' + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def show_software_deployment(self, deploy_id): + """Returns a specific software deployment.""" + url = 'software_deployments/%s' % str(deploy_id) + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def show_software_deployment_metadata(self, server_id): + """Return a config metadata for a specific server.""" + url = 'software_deployments/metadata/%s' % server_id + resp, body = self.get(url) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def delete_software_deploy(self, deploy_id): + """Deletes a specific software deployment.""" + url = 'software_deployments/%s' % str(deploy_id) + resp, _ = self.delete(url) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp) + + def _prep_software_config_create(self, name=None, conf=None, group=None, + inputs=None, outputs=None, options=None): + """Prepares a software configuration body.""" + post_body = {} + if name is not None: + post_body["name"] = name + if conf is not None: + post_body["config"] = conf + if group is not None: + post_body["group"] = group + if inputs is not None: + post_body["inputs"] = inputs + if outputs is not None: + post_body["outputs"] = outputs + if options is not None: + post_body["options"] = options + body = json.dumps(post_body) + + headers = self.get_headers() + return headers, body + + def _prep_software_deploy_update(self, deploy_id=None, server_id=None, + config_id=None, action=None, status=None, + input_values=None, output_values=None, + status_reason=None, + signal_transport=None): + """Prepares a deployment create or update (if an id was given).""" + post_body = {} + + if deploy_id is not None: + post_body["id"] = deploy_id + if server_id is not None: + post_body["server_id"] = server_id + if config_id is not None: + post_body["config_id"] = config_id + if action is not None: + post_body["action"] = action + if status is not None: + post_body["status"] = status + if input_values is not None: + post_body["input_values"] = input_values + if output_values is not None: + post_body["output_values"] = output_values + if status_reason is not None: + post_body["status_reason"] = status_reason + if signal_transport is not None: + post_body["signal_transport"] = signal_transport + body = json.dumps(post_body) + + headers = self.get_headers() + return headers, body diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/__init__.py b/neutron_lbaas/tests/tempest/lib/services/volume/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/volume/json/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/json/snapshots_client.py b/neutron_lbaas/tests/tempest/lib/services/volume/json/snapshots_client.py new file mode 100644 index 000000000..978e4f3f0 --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/volume/json/snapshots_client.py @@ -0,0 +1,201 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time + +from oslo_log import log as logging +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib import exceptions + +LOG = logging.getLogger(__name__) + + +class BaseSnapshotsClientJSON(service_client.ServiceClient): + """Base Client class to send CRUD Volume API requests.""" + + create_resp = 200 + + def list_snapshots(self, detail=False, params=None): + """List all the snapshot.""" + url = 'snapshots' + if detail: + url += '/detail' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, body['snapshots']) + + def show_snapshot(self, snapshot_id): + """Returns the details of a single snapshot.""" + url = "snapshots/%s" % str(snapshot_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['snapshot']) + + def create_snapshot(self, volume_id, **kwargs): + """ + Creates a new snapshot. + volume_id(Required): id of the volume. + force: Create a snapshot even if the volume attached (Default=False) + display_name: Optional snapshot Name. + display_description: User friendly snapshot description. + """ + post_body = {'volume_id': volume_id} + post_body.update(kwargs) + post_body = json.dumps({'snapshot': post_body}) + resp, body = self.post('snapshots', post_body) + body = json.loads(body) + self.expected_success(self.create_resp, resp.status) + return service_client.ResponseBody(resp, body['snapshot']) + + def update_snapshot(self, snapshot_id, **kwargs): + """Updates a snapshot.""" + put_body = json.dumps({'snapshot': kwargs}) + resp, body = self.put('snapshots/%s' % snapshot_id, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['snapshot']) + + # NOTE(afazekas): just for the wait function + def _get_snapshot_status(self, snapshot_id): + body = self.show_snapshot(snapshot_id) + status = body['status'] + # NOTE(afazekas): snapshot can reach an "error" + # state in a "normal" lifecycle + if (status == 'error'): + raise exceptions.SnapshotBuildErrorException( + snapshot_id=snapshot_id) + + return status + + # NOTE(afazkas): Wait reinvented again. It is not in the correct layer + def wait_for_snapshot_status(self, snapshot_id, status): + """Waits for a Snapshot to reach a given status.""" + start_time = time.time() + old_value = value = self._get_snapshot_status(snapshot_id) + while True: + dtime = time.time() - start_time + time.sleep(self.build_interval) + if value != old_value: + LOG.info('Value transition from "%s" to "%s"' + 'in %d second(s).', old_value, + value, dtime) + if (value == status): + return value + + if dtime > self.build_timeout: + message = ('Time Limit Exceeded! (%ds)' + 'while waiting for %s, ' + 'but we got %s.' % + (self.build_timeout, status, value)) + raise exceptions.TimeoutException(message) + time.sleep(self.build_interval) + old_value = value + value = self._get_snapshot_status(snapshot_id) + + def delete_snapshot(self, snapshot_id): + """Delete Snapshot.""" + resp, body = self.delete("snapshots/%s" % str(snapshot_id)) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def is_resource_deleted(self, id): + try: + self.show_snapshot(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'volume-snapshot' + + def reset_snapshot_status(self, snapshot_id, status): + """Reset the specified snapshot's status.""" + post_body = json.dumps({'os-reset_status': {"status": status}}) + resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def update_snapshot_status(self, snapshot_id, status, progress): + """Update the specified snapshot's status.""" + post_body = { + 'status': status, + 'progress': progress + } + post_body = json.dumps({'os-update_snapshot_status': post_body}) + url = 'snapshots/%s/action' % str(snapshot_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def create_snapshot_metadata(self, snapshot_id, metadata): + """Create metadata for the snapshot.""" + put_body = json.dumps({'metadata': metadata}) + url = "snapshots/%s/metadata" % str(snapshot_id) + resp, body = self.post(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def show_snapshot_metadata(self, snapshot_id): + """Get metadata of the snapshot.""" + url = "snapshots/%s/metadata" % str(snapshot_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def update_snapshot_metadata(self, snapshot_id, metadata): + """Update metadata for the snapshot.""" + put_body = json.dumps({'metadata': metadata}) + url = "snapshots/%s/metadata" % str(snapshot_id) + resp, body = self.put(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): + """Update metadata item for the snapshot.""" + put_body = json.dumps({'meta': meta_item}) + url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) + resp, body = self.put(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['meta']) + + def delete_snapshot_metadata_item(self, snapshot_id, id): + """Delete metadata item for the snapshot.""" + url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) + resp, body = self.delete(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def force_delete_snapshot(self, snapshot_id): + """Force Delete Snapshot.""" + post_body = json.dumps({'os-force_delete': {}}) + resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + +class SnapshotsClientJSON(BaseSnapshotsClientJSON): + """Client class to send CRUD Volume V1 API requests.""" diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/json/volumes_client.py b/neutron_lbaas/tests/tempest/lib/services/volume/json/volumes_client.py new file mode 100644 index 000000000..3d218d8af --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/volume/json/volumes_client.py @@ -0,0 +1,351 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time + +from six.moves.urllib import parse as urllib +from tempest_lib import exceptions as lib_exc + +from neutron_lbaas.tests.tempest.lib.common import service_client +from neutron_lbaas.tests.tempest.lib import exceptions + + +class BaseVolumesClientJSON(service_client.ServiceClient): + """ + Base client class to send CRUD Volume API requests to a Cinder endpoint + """ + + create_resp = 200 + + def __init__(self, auth_provider, service, region, + default_volume_size=1, **kwargs): + super(BaseVolumesClientJSON, self).__init__( + auth_provider, service, region, **kwargs) + self.default_volume_size = default_volume_size + + def get_attachment_from_volume(self, volume): + """Return the element 'attachment' from input volumes.""" + return volume['attachments'][0] + + def list_volumes(self, detail=False, params=None): + """List all the volumes created.""" + url = 'volumes' + if detail: + url += '/detail' + if params: + url += '?%s' % urllib.urlencode(params) + + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, body['volumes']) + + def show_volume(self, volume_id): + """Returns the details of a single volume.""" + url = "volumes/%s" % str(volume_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['volume']) + + def create_volume(self, size=None, **kwargs): + """ + Creates a new Volume. + size: Size of volume in GB. + Following optional keyword arguments are accepted: + display_name: Optional Volume Name(only for V1). + name: Optional Volume Name(only for V2). + metadata: A dictionary of values to be used as metadata. + volume_type: Optional Name of volume_type for the volume + snapshot_id: When specified the volume is created from this snapshot + imageRef: When specified the volume is created from this image + """ + if size is None: + size = self.default_volume_size + post_body = {'size': size} + post_body.update(kwargs) + post_body = json.dumps({'volume': post_body}) + resp, body = self.post('volumes', post_body) + body = json.loads(body) + self.expected_success(self.create_resp, resp.status) + return service_client.ResponseBody(resp, body['volume']) + + def update_volume(self, volume_id, **kwargs): + """Updates the Specified Volume.""" + put_body = json.dumps({'volume': kwargs}) + resp, body = self.put('volumes/%s' % volume_id, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['volume']) + + def delete_volume(self, volume_id): + """Deletes the Specified Volume.""" + resp, body = self.delete("volumes/%s" % str(volume_id)) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def upload_volume(self, volume_id, image_name, disk_format): + """Uploads a volume in Glance.""" + post_body = { + 'image_name': image_name, + 'disk_format': disk_format + } + post_body = json.dumps({'os-volume_upload_image': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, + body['os-volume_upload_image']) + + def attach_volume(self, volume_id, instance_uuid, mountpoint): + """Attaches a volume to a given instance on a given mountpoint.""" + post_body = { + 'instance_uuid': instance_uuid, + 'mountpoint': mountpoint, + } + post_body = json.dumps({'os-attach': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def set_bootable_volume(self, volume_id, bootable): + """set a bootable flag for a volume - true or false.""" + post_body = {"bootable": bootable} + post_body = json.dumps({'os-set_bootable': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def detach_volume(self, volume_id): + """Detaches a volume from an instance.""" + post_body = {} + post_body = json.dumps({'os-detach': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def reserve_volume(self, volume_id): + """Reserves a volume.""" + post_body = {} + post_body = json.dumps({'os-reserve': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def unreserve_volume(self, volume_id): + """Restore a reserved volume .""" + post_body = {} + post_body = json.dumps({'os-unreserve': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def wait_for_volume_status(self, volume_id, status): + """Waits for a Volume to reach a given status.""" + body = self.show_volume(volume_id) + volume_status = body['status'] + start = int(time.time()) + + while volume_status != status: + time.sleep(self.build_interval) + body = self.show_volume(volume_id) + volume_status = body['status'] + if volume_status == 'error': + raise exceptions.VolumeBuildErrorException(volume_id=volume_id) + + if int(time.time()) - start >= self.build_timeout: + message = ('Volume %s failed to reach %s status (current: %s) ' + 'within the required time ' + '(%s s).' % (volume_id, + status, + volume_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + + def is_resource_deleted(self, id): + try: + self.show_volume(id) + except lib_exc.NotFound: + return True + return False + + @property + def resource_type(self): + """Returns the primary type of resource this client works with.""" + return 'volume' + + def extend_volume(self, volume_id, extend_size): + """Extend a volume.""" + post_body = { + 'new_size': extend_size + } + post_body = json.dumps({'os-extend': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def reset_volume_status(self, volume_id, status): + """Reset the Specified Volume's Status.""" + post_body = json.dumps({'os-reset_status': {"status": status}}) + resp, body = self.post('volumes/%s/action' % volume_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def volume_begin_detaching(self, volume_id): + """Volume Begin Detaching.""" + # ref cinder/api/contrib/volume_actions.py#L158 + post_body = json.dumps({'os-begin_detaching': {}}) + resp, body = self.post('volumes/%s/action' % volume_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def volume_roll_detaching(self, volume_id): + """Volume Roll Detaching.""" + # cinder/api/contrib/volume_actions.py#L170 + post_body = json.dumps({'os-roll_detaching': {}}) + resp, body = self.post('volumes/%s/action' % volume_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def create_volume_transfer(self, vol_id, display_name=None): + """Create a volume transfer.""" + post_body = { + 'volume_id': vol_id + } + if display_name: + post_body['name'] = display_name + post_body = json.dumps({'transfer': post_body}) + resp, body = self.post('os-volume-transfer', post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body['transfer']) + + def show_volume_transfer(self, transfer_id): + """Returns the details of a volume transfer.""" + url = "os-volume-transfer/%s" % str(transfer_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['transfer']) + + def list_volume_transfers(self, params=None): + """List all the volume transfers created.""" + url = 'os-volume-transfer' + if params: + url += '?%s' % urllib.urlencode(params) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBodyList(resp, body['transfers']) + + def delete_volume_transfer(self, transfer_id): + """Delete a volume transfer.""" + resp, body = self.delete("os-volume-transfer/%s" % str(transfer_id)) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def accept_volume_transfer(self, transfer_id, transfer_auth_key): + """Accept a volume transfer.""" + post_body = { + 'auth_key': transfer_auth_key, + } + url = 'os-volume-transfer/%s/accept' % transfer_id + post_body = json.dumps({'accept': post_body}) + resp, body = self.post(url, post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body['transfer']) + + def update_volume_readonly(self, volume_id, readonly): + """Update the Specified Volume readonly.""" + post_body = { + 'readonly': readonly + } + post_body = json.dumps({'os-update_readonly_flag': post_body}) + url = 'volumes/%s/action' % (volume_id) + resp, body = self.post(url, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def force_delete_volume(self, volume_id): + """Force Delete Volume.""" + post_body = json.dumps({'os-force_delete': {}}) + resp, body = self.post('volumes/%s/action' % volume_id, post_body) + self.expected_success(202, resp.status) + return service_client.ResponseBody(resp, body) + + def create_volume_metadata(self, volume_id, metadata): + """Create metadata for the volume.""" + put_body = json.dumps({'metadata': metadata}) + url = "volumes/%s/metadata" % str(volume_id) + resp, body = self.post(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def show_volume_metadata(self, volume_id): + """Get metadata of the volume.""" + url = "volumes/%s/metadata" % str(volume_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def update_volume_metadata(self, volume_id, metadata): + """Update metadata for the volume.""" + put_body = json.dumps({'metadata': metadata}) + url = "volumes/%s/metadata" % str(volume_id) + resp, body = self.put(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['metadata']) + + def update_volume_metadata_item(self, volume_id, id, meta_item): + """Update metadata item for the volume.""" + put_body = json.dumps({'meta': meta_item}) + url = "volumes/%s/metadata/%s" % (str(volume_id), str(id)) + resp, body = self.put(url, put_body) + body = json.loads(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body['meta']) + + def delete_volume_metadata_item(self, volume_id, id): + """Delete metadata item for the volume.""" + url = "volumes/%s/metadata/%s" % (str(volume_id), str(id)) + resp, body = self.delete(url) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def retype_volume(self, volume_id, volume_type, **kwargs): + """Updates volume with new volume type.""" + post_body = {'new_type': volume_type} + post_body.update(kwargs) + post_body = json.dumps({'os-retype': post_body}) + resp, body = self.post('volumes/%s/action' % volume_id, post_body) + self.expected_success(202, resp.status) + + +class VolumesClientJSON(BaseVolumesClientJSON): + """ + Client class to send CRUD Volume V1 API requests to a Cinder endpoint + """ diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/v2/__init__.py b/neutron_lbaas/tests/tempest/lib/services/volume/v2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/__init__.py b/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/snapshots_client.py b/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/snapshots_client.py new file mode 100644 index 000000000..1b9fb86bc --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/snapshots_client.py @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lbaas.tests.tempest.lib.services.volume.json import snapshots_client + + +class SnapshotsV2ClientJSON(snapshots_client.BaseSnapshotsClientJSON): + """Client class to send CRUD Volume V2 API requests.""" + api_version = "v2" + create_resp = 202 diff --git a/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/volumes_client.py b/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/volumes_client.py new file mode 100644 index 000000000..34330945a --- /dev/null +++ b/neutron_lbaas/tests/tempest/lib/services/volume/v2/json/volumes_client.py @@ -0,0 +1,24 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lbaas.tests.tempest.lib.services.volume.json import volumes_client + + +class VolumesV2ClientJSON(volumes_client.BaseVolumesClientJSON): + """ + Client class to send CRUD Volume V2 API requests to a Cinder endpoint + """ + api_version = "v2" + create_resp = 202 diff --git a/neutron_lbaas/tests/tempest/v2/scenario/base.py b/neutron_lbaas/tests/tempest/v2/scenario/base.py new file mode 100644 index 000000000..16ab9004e --- /dev/null +++ b/neutron_lbaas/tests/tempest/v2/scenario/base.py @@ -0,0 +1,364 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile +import time + +import six +from six.moves.urllib import request as urllib2 + +from neutron_lbaas.tests.tempest.lib.common import commands +from neutron_lbaas.tests.tempest.lib import config +from neutron_lbaas.tests.tempest.lib import exceptions +from neutron_lbaas.tests.tempest.lib.services.network import resources as \ + net_resources +from neutron_lbaas.tests.tempest.lib import test +from neutron_lbaas.tests.tempest.v2.scenario import manager + +config = config.CONF + + +class BaseTestCase(manager.NetworkScenarioTest): + + @classmethod + def skip_checks(cls): + super(BaseTestCase, cls).skip_checks() + cfg = config.network + if not test.is_extension_enabled('lbaasv2', 'network'): + msg = 'LBaaS Extension is not enabled' + raise cls.skipException(msg) + if not (cfg.tenant_networks_reachable or cfg.public_network_id): + msg = ('Either tenant_networks_reachable must be "true", or ' + 'public_network_id must be defined.') + raise cls.skipException(msg) + + @classmethod + def resource_setup(cls): + super(BaseTestCase, cls).resource_setup() + cls.servers_keypairs = {} + cls.members = [] + cls.floating_ips = {} + cls.servers_floating_ips = {} + cls.server_ips = {} + cls.port1 = 80 + cls.port2 = 88 + cls.num = 50 + + @classmethod + def resource_cleanup(cls): + super(BaseTestCase, cls).resource_cleanup() + + def _set_net_and_subnet(self): + """ + Query and set appropriate network and subnet attributes to be used + for the test. Existing tenant networks are used if they are found. + The configured private network and associated subnet is used as a + fallback in absence of tenant networking. + """ + try: + tenant_net = self._list_networks(tenant_id=self.tenant_id)[0] + except IndexError: + tenant_net = None + + if tenant_net: + tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0] + self.subnet = net_resources.DeletableSubnet( + client=self.network_client, + **tenant_subnet) + self.network = tenant_net + else: + self.network = self._get_network_by_name( + config.compute.fixed_network_name) + # We are assuming that the first subnet associated + # with the fixed network is the one we want. In the future, we + # should instead pull a subnet id from config, which is set by + # devstack/admin/etc. + subnet = self._list_subnets(network_id=self.network['id'])[0] + self.subnet = net_resources.AttributeDict(subnet) + + def _create_security_group_for_test(self): + self.security_group = self._create_security_group( + tenant_id=self.tenant_id) + self._create_security_group_rules_for_port(self.port1) + self._create_security_group_rules_for_port(self.port2) + + def _create_security_group_rules_for_port(self, port): + rule = { + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': port, + 'port_range_max': port, + } + self._create_security_group_rule( + secgroup=self.security_group, + tenant_id=self.tenant_id, + **rule) + + def _create_server(self, name): + keypair = self.create_keypair() + security_groups = [{'name': self.security_group['name']}] + create_kwargs = { + 'networks': [ + {'uuid': self.network['id']}, + ], + 'key_name': keypair['name'], + 'security_groups': security_groups, + } + net_name = self.network['name'] + server = self.create_server(name=name, create_kwargs=create_kwargs) + self.servers_keypairs[server['id']] = keypair + if (config.network.public_network_id and not + config.network.tenant_networks_reachable): + public_network_id = config.network.public_network_id + floating_ip = self.create_floating_ip( + server, public_network_id) + self.floating_ips[floating_ip] = server + self.server_ips[server['id']] = floating_ip.floating_ip_address + else: + self.server_ips[server['id']] =\ + server['addresses'][net_name][0]['addr'] + self.server_fixed_ips[server['id']] =\ + server['addresses'][net_name][0]['addr'] + self.assertTrue(self.servers_keypairs) + return server + + def _create_servers(self): + for count in range(2): + self._create_server(name=("server%s" % (count + 1))) + self.assertEqual(len(self.servers_keypairs), 2) + + def _start_servers(self): + """ + Start two backends + 1. SSH to the instance + 2. Start two http backends listening on ports 80 and 88 respectively + """ + for server_id, ip in six.iteritems(self.server_ips): + private_key = self.servers_keypairs[server_id]['private_key'] + server_name = self.servers_client.get_server(server_id)['name'] + username = config.scenario.ssh_user + ssh_client = self.get_remote_client( + server_or_ip=ip, + private_key=private_key) + + # Write a backend's response into a file + resp = ('echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n' + 'Connection: close\r\nContent-Type: text/html; ' + 'charset=UTF-8\r\n\r\n%s"; cat >/dev/null') + + with tempfile.NamedTemporaryFile() as script: + script.write(resp % server_name) + script.flush() + with tempfile.NamedTemporaryFile() as key: + key.write(private_key) + key.flush() + commands.copy_file_to_host(script.name, + "/tmp/script1", + ip, + username, key.name) + + # Start netcat + start_server = ('while true; do ' + 'sudo nc -ll -p %(port)s -e sh /tmp/%(script)s; ' + 'done > /dev/null &') + cmd = start_server % {'port': self.port1, + 'script': 'script1'} + ssh_client.exec_command(cmd) + + if len(self.server_ips) == 1: + with tempfile.NamedTemporaryFile() as script: + script.write(resp % 'server2') + script.flush() + with tempfile.NamedTemporaryFile() as key: + key.write(private_key) + key.flush() + commands.copy_file_to_host(script.name, + "/tmp/script2", ip, + username, key.name) + cmd = start_server % {'port': self.port2, + 'script': 'script2'} + ssh_client.exec_command(cmd) + + def _check_connection(self, check_ip, port=80): + def try_connect(ip, port): + try: + resp = urllib2.urlopen("http://{0}:{1}/".format(ip, port)) + if resp.getcode() == 200: + return True + return False + except IOError: + return False + except urllib2.HTTPError: + return False + timeout = config.compute.ping_timeout + start = time.time() + while not try_connect(check_ip, port): + if (time.time() - start) > timeout: + message = "Timed out trying to connect to %s" % check_ip + raise exceptions.TimeoutException(message) + + def _create_listener(self, load_balancer_id): + """Create a listener with HTTP protocol listening on port 80.""" + self.listener = self.listeners_client.create_listener( + loadbalancer_id=load_balancer_id, + protocol='HTTP', protocol_port=80) + self.assertTrue(self.listener) + return self.listener + + def _create_pool(self, listener_id): + """Create a pool with ROUND_ROBIN algorithm.""" + self.pool = self.pools_client.create_pool( + protocol='HTTP', + lb_algorithm='ROUND_ROBIN', + listener_id=listener_id) + self.assertTrue(self.pool) + return self.pool + + def _create_members(self, load_balancer_id=None, pool_id=None, + subnet_id=None): + """ + Create two members. + + In case there is only one server, create both members with the same ip + but with different ports to listen on. + """ + for server_id, ip in six.iteritems(self.server_fixed_ips): + if len(self.server_fixed_ips) == 1: + member1 = self.members_client.create_member( + pool_id=pool_id, + address=ip, + protocol_port=self.port1, + subnet_id=subnet_id) + self._wait_for_load_balancer_status(load_balancer_id) + member2 = self.members_client.create_member( + pool_id=pool_id, + address=ip, + protocol_port=self.port2, + subnet_id=subnet_id) + self._wait_for_load_balancer_status(load_balancer_id) + self.members.extend([member1, member2]) + else: + member = self.members_client.create_member( + pool_id=pool_id, + address=ip, + protocol_port=self.port1, + subnet_id=subnet_id) + self._wait_for_load_balancer_status(load_balancer_id) + self.members.append(member) + self.assertTrue(self.members) + + def _assign_floating_ip_to_lb_vip(self, lb): + public_network_id = config.network.public_network_id + port_id = lb.vip_port_id + floating_ip = self.create_floating_ip(lb, public_network_id, + port_id=port_id) + self.floating_ips.setdefault(lb.id, []) + self.floating_ips[lb.id].append(floating_ip) + # Check for floating ip status before you check load-balancer + self.check_floating_ip_status(floating_ip, "ACTIVE") + + def _create_load_balancer(self): + self.create_lb_kwargs = {'tenant_id': self.tenant_id, + 'vip_subnet_id': self.subnet['id']} + self.load_balancer = self.load_balancers_client.create_load_balancer( + **self.create_lb_kwargs) + load_balancer_id = self.load_balancer['id'] + self._wait_for_load_balancer_status(load_balancer_id) + + listener = self._create_listener(load_balancer_id=load_balancer_id) + self._wait_for_load_balancer_status(load_balancer_id) + + self.pool = self._create_pool(listener_id=listener.get('id')) + self._wait_for_load_balancer_status(load_balancer_id) + + self._create_members(load_balancer_id=load_balancer_id, + pool_id=self.pool['id'], + subnet_id=self.subnet['id']) + + if (config.network.public_network_id and not + config.network.tenant_networks_reachable): + load_balancer = net_resources.AttributeDict(self.load_balancer) + self._assign_floating_ip_to_lb_vip(load_balancer) + self.vip_ip = self.floating_ips[ + load_balancer.id][0]['floating_ip_address'] + + else: + self.vip_ip = self.lb.vip_address + + # Currently the ovs-agent is not enforcing security groups on the + # vip port - see https://bugs.launchpad.net/neutron/+bug/1163569 + # However the linuxbridge-agent does, and it is necessary to add a + # security group with a rule that allows tcp port 80 to the vip port. + self.network_client.update_port( + self.load_balancer.get('vip_port_id'), + security_groups=[self.security_group.id]) + + def _wait_for_load_balancer_status(self, load_balancer_id, + provisioning_status='ACTIVE', + operating_status='ONLINE'): + interval_time = 10 + timeout = 300 + end_time = time.time() + timeout + while time.time() < end_time: + lb = self.load_balancers_client.get_load_balancer(load_balancer_id) + if (lb.get('provisioning_status') == provisioning_status and + lb.get('operating_status') == operating_status): + break + elif (lb.get('provisioning_status') == 'ERROR' or + lb.get('operating_status') == 'ERROR'): + raise Exception( + _("Wait for load balancer for load balancer: {lb_id} " + "ran for {timeout} seconds and an ERROR was encountered " + "with provisioning status: {provisioning_status} and " + "operating status: {operating_status}").format( + timeout=timeout, + lb_id=lb.get('id'), + provisioning_status=provisioning_status, + operating_status=operating_status)) + time.sleep(interval_time) + else: + raise Exception( + _("Wait for load balancer ran for {timeout} seconds and did " + "not observe {lb_id} reach {provisioning_status} " + "provisioning status and {operating_status} " + "operating status.").format( + timeout=timeout, + lb_id=lb.get('id'), + provisioning_status=provisioning_status, + operating_status=operating_status)) + return lb + + def _check_load_balancing(self): + """ + 1. Send NUM requests on the floating ip associated with the VIP + 2. Check that the requests are shared between the two servers + """ + + self._check_connection(self.vip_ip) + self._send_requests(self.vip_ip, ["server1", "server2"]) + + def _send_requests(self, vip_ip, servers): + counters = dict.fromkeys(servers, 0) + for i in range(self.num): + try: + server = urllib2.urlopen("http://{0}/".format(vip_ip)).read() + counters[server] += 1 + # HTTP exception means fail of server, so don't increase counter + # of success and continue connection tries + except urllib2.HTTPError: + continue + # Assert that each member of the pool gets balanced at least once + for member, counter in six.iteritems(counters): + self.assertGreater(counter, 0, 'Member %s never balanced' % member) diff --git a/neutron_lbaas/tests/tempest/v2/scenario/manager.py b/neutron_lbaas/tests/tempest/v2/scenario/manager.py new file mode 100644 index 000000000..b5b71a892 --- /dev/null +++ b/neutron_lbaas/tests/tempest/v2/scenario/manager.py @@ -0,0 +1,1428 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import subprocess + +import netaddr +from neutron.i18n import _LI, _LW, _LE +from oslo_log import log +import six +from tempest_lib.common.utils import data_utils +from tempest_lib.common.utils import misc as misc_utils +from tempest_lib import exceptions as lib_exc +from tempest_lib import exceptions + +from neutron_lbaas.tests.tempest.lib.common import fixed_network +from neutron_lbaas.tests.tempest.lib.common.utils.linux import remote_client +from neutron_lbaas.tests.tempest.lib import config +from neutron_lbaas.tests.tempest.lib.services.network import resources as \ + net_resources +from neutron_lbaas.tests.tempest.lib import test + +CONF = config.CONF + +LOG = log.getLogger(__name__) + + +class ScenarioTest(test.BaseTestCase): + """Base class for scenario tests. Uses tempest own clients. """ + + credentials = ['primary'] + + @classmethod + def setup_clients(cls): + super(ScenarioTest, cls).setup_clients() + # Clients (in alphabetical order) + cls.flavors_client = cls.manager.flavors_client + cls.floating_ips_client = cls.manager.floating_ips_client + # Glance image client v1 + cls.image_client = cls.manager.image_client + # Compute image client + cls.images_client = cls.manager.images_client + cls.keypairs_client = cls.manager.keypairs_client + # Nova security groups client + cls.security_groups_client = cls.manager.security_groups_client + cls.servers_client = cls.manager.servers_client + cls.volumes_client = cls.manager.volumes_client + cls.snapshots_client = cls.manager.snapshots_client + cls.interface_client = cls.manager.interfaces_client + # Neutron network client + cls.network_client = cls.manager.network_client + # Heat client + cls.orchestration_client = cls.manager.orchestration_client + + # ## Methods to handle sync and async deletes + + def setUp(self): + super(ScenarioTest, self).setUp() + self.cleanup_waits = [] + # NOTE(mtreinish) This is safe to do in setUp instead of setUp class + # because scenario tests in the same test class should not share + # resources. If resources were shared between test cases then it + # should be a single scenario test instead of multiples. + + # NOTE(yfried): this list is cleaned at the end of test_methods and + # not at the end of the class + self.addCleanup(self._wait_for_cleanups) + + def delete_wrapper(self, delete_thing, *args, **kwargs): + """Ignores NotFound exceptions for delete operations. + + @param delete_thing: delete method of a resource. method will be + executed as delete_thing(*args, **kwargs) + + """ + try: + # Tempest clients return dicts, so there is no common delete + # method available. Using a callable instead + delete_thing(*args, **kwargs) + except lib_exc.NotFound: + # If the resource is already missing, mission accomplished. + pass + + def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param, + cleanup_callable, cleanup_args=None, + cleanup_kwargs=None, ignore_error=True): + """Adds wait for async resource deletion at the end of cleanups + + @param waiter_callable: callable to wait for the resource to delete + @param thing_id: the id of the resource to be cleaned-up + @param thing_id_param: the name of the id param in the waiter + @param cleanup_callable: method to load pass to self.addCleanup with + the following *cleanup_args, **cleanup_kwargs. + usually a delete method. + """ + if cleanup_args is None: + cleanup_args = [] + if cleanup_kwargs is None: + cleanup_kwargs = {} + self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs) + wait_dict = { + 'waiter_callable': waiter_callable, + thing_id_param: thing_id + } + self.cleanup_waits.append(wait_dict) + + def _wait_for_cleanups(self): + """To handle async delete actions, a list of waits is added + which will be iterated over as the last step of clearing the + cleanup queue. That way all the delete calls are made up front + and the tests won't succeed unless the deletes are eventually + successful. This is the same basic approach used in the api tests to + limit cleanup execution time except here it is multi-resource, + because of the nature of the scenario tests. + """ + for wait in self.cleanup_waits: + waiter_callable = wait.pop('waiter_callable') + waiter_callable(**wait) + + # ## Test functions library + # + # The create_[resource] functions only return body and discard the + # resp part which is not used in scenario tests + + def create_keypair(self, client=None): + if not client: + client = self.keypairs_client + name = data_utils.rand_name(self.__class__.__name__) + # We don't need to create a keypair by pubkey in scenario + body = client.create_keypair(name) + self.addCleanup(client.delete_keypair, name) + return body + + def create_server(self, name=None, image=None, flavor=None, + wait_on_boot=True, wait_on_delete=True, + create_kwargs=None): + """Creates VM instance. + + @param image: image from which to create the instance + @param wait_on_boot: wait for status ACTIVE before continue + @param wait_on_delete: force synchronous delete on cleanup + @param create_kwargs: additional details for instance creation + @return: server dict + """ + if name is None: + name = data_utils.rand_name(self.__class__.__name__) + if image is None: + image = CONF.compute.image_ref + if flavor is None: + flavor = CONF.compute.flavor_ref + if create_kwargs is None: + create_kwargs = {} + network = self.get_tenant_network() + create_kwargs = fixed_network.set_networks_kwarg(network, + create_kwargs) + + LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)", + name, image, flavor) + server = self.servers_client.create_server(name, image, flavor, + **create_kwargs) + if wait_on_delete: + self.addCleanup(self.servers_client.wait_for_server_termination, + server['id']) + self.addCleanup_with_wait( + waiter_callable=self.servers_client.wait_for_server_termination, + thing_id=server['id'], thing_id_param='server_id', + cleanup_callable=self.delete_wrapper, + cleanup_args=[self.servers_client.delete_server, server['id']]) + if wait_on_boot: + self.servers_client.wait_for_server_status(server_id=server['id'], + status='ACTIVE') + # The instance retrieved on creation is missing network + # details, necessitating retrieval after it becomes active to + # ensure correct details. + server = self.servers_client.get_server(server['id']) + self.assertEqual(server['name'], name) + return server + + def create_volume(self, size=None, name=None, snapshot_id=None, + imageRef=None, volume_type=None, wait_on_delete=True): + if name is None: + name = data_utils.rand_name(self.__class__.__name__) + volume = self.volumes_client.create_volume( + size=size, display_name=name, snapshot_id=snapshot_id, + imageRef=imageRef, volume_type=volume_type) + + if wait_on_delete: + self.addCleanup(self.volumes_client.wait_for_resource_deletion, + volume['id']) + self.addCleanup(self.delete_wrapper, + self.volumes_client.delete_volume, volume['id']) + else: + self.addCleanup_with_wait( + waiter_callable=self.volumes_client.wait_for_resource_deletion, + thing_id=volume['id'], thing_id_param='id', + cleanup_callable=self.delete_wrapper, + cleanup_args=[self.volumes_client.delete_volume, volume['id']]) + + self.assertEqual(name, volume['display_name']) + self.volumes_client.wait_for_volume_status(volume['id'], 'available') + # The volume retrieved on creation has a non-up-to-date status. + # Retrieval after it becomes active ensures correct details. + volume = self.volumes_client.show_volume(volume['id']) + return volume + + def _create_loginable_secgroup_rule(self, secgroup_id=None): + _client = self.security_groups_client + if secgroup_id is None: + sgs = _client.list_security_groups() + for sg in sgs: + if sg['name'] == 'default': + secgroup_id = sg['id'] + + # These rules are intended to permit inbound ssh and icmp + # traffic from all sources, so no group_id is provided. + # Setting a group_id would only permit traffic from ports + # belonging to the same security group. + rulesets = [ + { + # ssh + 'ip_proto': 'tcp', + 'from_port': 22, + 'to_port': 22, + 'cidr': '0.0.0.0/0', + }, + { + # ping + 'ip_proto': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'cidr': '0.0.0.0/0', + } + ] + rules = list() + for ruleset in rulesets: + sg_rule = _client.create_security_group_rule(secgroup_id, + **ruleset) + self.addCleanup(self.delete_wrapper, + _client.delete_security_group_rule, + sg_rule['id']) + rules.append(sg_rule) + return rules + + def _create_security_group(self): + # Create security group + sg_name = data_utils.rand_name(self.__class__.__name__) + sg_desc = sg_name + " description" + secgroup = self.security_groups_client.create_security_group( + sg_name, sg_desc) + self.assertEqual(secgroup['name'], sg_name) + self.assertEqual(secgroup['description'], sg_desc) + self.addCleanup(self.delete_wrapper, + self.security_groups_client.delete_security_group, + secgroup['id']) + + # Add rules to the security group + self._create_loginable_secgroup_rule(secgroup['id']) + + return secgroup + + def get_remote_client(self, server_or_ip, username=None, private_key=None, + log_console_of_servers=None): + """Get a SSH client to a remote server + + @param server_or_ip a server object as returned by Tempest compute + client or an IP address to connect to + @param username name of the Linux account on the remote server + @param private_key the SSH private key to use + @param log_console_of_servers a list of server objects. Each server + in the list will have its console printed in the logs in case the + SSH connection failed to be established + @return a RemoteClient object + """ + if isinstance(server_or_ip, six.string_types): + ip = server_or_ip + else: + addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh] + try: + ip = (addr['addr'] for addr in addrs if + netaddr.valid_ipv4(addr['addr'])).next() + except StopIteration: + raise lib_exc.NotFound("No IPv4 addresses to use for SSH to " + "remote server.") + + if username is None: + username = CONF.scenario.ssh_user + # Set this with 'keypair' or others to log in with keypair or + # username/password. + if CONF.compute.ssh_auth_method == 'keypair': + password = None + if private_key is None: + private_key = self.keypair['private_key'] + else: + password = CONF.compute.image_ssh_password + private_key = None + linux_client = remote_client.RemoteClient(ip, username, + pkey=private_key, + password=password) + try: + linux_client.validate_authentication() + except Exception as e: + message = ('Initializing SSH connection to %(ip)s failed. ' + 'Error: %(error)s' % {'ip': ip, 'error': e}) + caller = misc_utils.find_test_caller() + if caller: + message = '(%s) %s' % (caller, message) + LOG.exception(message) + # If we don't explicitly set for which servers we want to + # log the console output then all the servers will be logged. + # See the definition of _log_console_output() + self._log_console_output(log_console_of_servers) + raise + + return linux_client + + def _image_create(self, name, fmt, path, + disk_format=None, properties=None): + if properties is None: + properties = {} + name = data_utils.rand_name('%s-' % name) + image_file = open(path, 'rb') + self.addCleanup(image_file.close) + params = { + 'name': name, + 'container_format': fmt, + 'disk_format': disk_format or fmt, + 'is_public': 'False', + } + params['properties'] = properties + image = self.image_client.create_image(**params) + self.addCleanup(self.image_client.delete_image, image['id']) + self.assertEqual("queued", image['status']) + self.image_client.update_image(image['id'], data=image_file) + return image['id'] + + def glance_image_create(self): + img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file + aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file + ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file + ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file + img_container_format = CONF.scenario.img_container_format + img_disk_format = CONF.scenario.img_disk_format + img_properties = CONF.scenario.img_properties + LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, " + "properties: %s, ami: %s, ari: %s, aki: %s" % + (img_path, img_container_format, img_disk_format, + img_properties, ami_img_path, ari_img_path, aki_img_path)) + try: + self.image = self._image_create('scenario-img', + img_container_format, + img_path, + disk_format=img_disk_format, + properties=img_properties) + except IOError: + LOG.debug("A qcow2 image was not found. Try to get a uec image.") + kernel = self._image_create('scenario-aki', 'aki', aki_img_path) + ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path) + properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk} + self.image = self._image_create('scenario-ami', 'ami', + path=ami_img_path, + properties=properties) + LOG.debug("image:%s" % self.image) + + def _log_console_output(self, servers=None): + if not CONF.compute_feature_enabled.console_output: + LOG.debug('Console output not supported, cannot log') + return + if not servers: + servers = self.servers_client.list_servers() + servers = servers['servers'] + for server in servers: + console_output = self.servers_client.get_console_output( + server['id'], length=None).data + LOG.debug('Console output for %s\nbody=\n%s', + server['id'], console_output) + + def _log_net_info(self, exc): + # network debug is called as part of ssh init + if not isinstance(exc, lib_exc.SSHTimeout): + LOG.debug('Network information on a devstack host') + + def create_server_snapshot(self, server, name=None): + # Glance client + _image_client = self.image_client + # Compute client + _images_client = self.images_client + if name is None: + name = data_utils.rand_name('scenario-snapshot') + LOG.debug("Creating a snapshot image for server: %s", server['name']) + image = _images_client.create_image(server['id'], name) + image_id = image.response['location'].split('images/')[1] + _image_client.wait_for_image_status(image_id, 'active') + self.addCleanup_with_wait( + waiter_callable=_image_client.wait_for_resource_deletion, + thing_id=image_id, thing_id_param='id', + cleanup_callable=self.delete_wrapper, + cleanup_args=[_image_client.delete_image, image_id]) + snapshot_image = _image_client.get_image_meta(image_id) + image_name = snapshot_image['name'] + self.assertEqual(name, image_name) + LOG.debug("Created snapshot image %s for server %s", + image_name, server['name']) + return snapshot_image + + def nova_volume_attach(self): + volume = self.servers_client.attach_volume( + self.server['id'], self.volume['id'], '/dev/%s' + % CONF.compute.volume_device_name) + self.assertEqual(self.volume['id'], volume['id']) + self.volumes_client.wait_for_volume_status(volume['id'], 'in-use') + # Refresh the volume after the attachment + self.volume = self.volumes_client.show_volume(volume['id']) + + def nova_volume_detach(self): + self.servers_client.detach_volume(self.server['id'], self.volume['id']) + self.volumes_client.wait_for_volume_status(self.volume['id'], + 'available') + + volume = self.volumes_client.show_volume(self.volume['id']) + self.assertEqual('available', volume['status']) + + def rebuild_server(self, server_id, image=None, + preserve_ephemeral=False, wait=True, + rebuild_kwargs=None): + if image is None: + image = CONF.compute.image_ref + + rebuild_kwargs = rebuild_kwargs or {} + + LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)", + server_id, image, preserve_ephemeral) + self.servers_client.rebuild(server_id=server_id, image_ref=image, + preserve_ephemeral=preserve_ephemeral, + **rebuild_kwargs) + if wait: + self.servers_client.wait_for_server_status(server_id, 'ACTIVE') + + def ping_ip_address(self, ip_address, should_succeed=True, + ping_timeout=None): + timeout = ping_timeout or CONF.compute.ping_timeout + cmd = ['ping', '-c1', '-w1', ip_address] + + def ping(): + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + proc.communicate() + return (proc.returncode == 0) == should_succeed + + return test.call_until_true(ping, timeout, 1) + + def check_vm_connectivity(self, ip_address, + username=None, + private_key=None, + should_connect=True): + """ + :param ip_address: server to test against + :param username: server's ssh username + :param private_key: server's ssh private key to be used + :param should_connect: True/False indicates positive/negative test + positive - attempt ping and ssh + negative - attempt ping and fail if succeed + + :raises: AssertError if the result of the connectivity check does + not match the value of the should_connect param + """ + if should_connect: + msg = "Timed out waiting for %s to become reachable" % ip_address + else: + msg = "ip address %s is reachable" % ip_address + self.assertTrue(self.ping_ip_address(ip_address, + should_succeed=should_connect), + msg=msg) + if should_connect: + # no need to check ssh for negative connectivity + self.get_remote_client(ip_address, username, private_key) + + def check_public_network_connectivity(self, ip_address, username, + private_key, should_connect=True, + msg=None, servers=None): + # The target login is assumed to have been configured for + # key-based authentication by cloud-init. + LOG.debug('checking network connections to IP %s with user: %s' % + (ip_address, username)) + try: + self.check_vm_connectivity(ip_address, + username, + private_key, + should_connect=should_connect) + except Exception: + ex_msg = 'Public network connectivity check failed' + if msg: + ex_msg += ": " + msg + LOG.exception(ex_msg) + self._log_console_output(servers) + raise + + def create_floating_ip(self, thing, pool_name=None): + """Creates a floating IP and associates to a server using + Nova clients + """ + + floating_ip = self.floating_ips_client.create_floating_ip(pool_name) + self.addCleanup(self.delete_wrapper, + self.floating_ips_client.delete_floating_ip, + floating_ip['id']) + self.floating_ips_client.associate_floating_ip_to_server( + floating_ip['ip'], thing['id']) + return floating_ip + + +class NetworkScenarioTest(ScenarioTest): + """Base class for network scenario tests. + This class provide helpers for network scenario tests, using the neutron + API. Helpers from ancestor which use the nova network API are overridden + with the neutron API. + + This Class also enforces using Neutron instead of novanetwork. + Subclassed tests will be skipped if Neutron is not enabled + + """ + + credentials = ['primary', 'admin'] + + @classmethod + def skip_checks(cls): + super(NetworkScenarioTest, cls).skip_checks() + if not CONF.service_available.neutron: + raise cls.skipException('Neutron not available') + + @classmethod + def resource_setup(cls): + super(NetworkScenarioTest, cls).resource_setup() + cls.tenant_id = cls.manager.identity_client.tenant_id + + def _create_network(self, client=None, tenant_id=None, + namestart='network-smoke-'): + if not client: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + name = data_utils.rand_name(namestart) + result = client.create_network(name=name, tenant_id=tenant_id) + network = net_resources.DeletableNetwork(client=client, + **result['network']) + self.assertEqual(network.name, name) + self.addCleanup(self.delete_wrapper, network.delete) + return network + + def _list_networks(self, *args, **kwargs): + """List networks using admin creds """ + return self._admin_lister('networks')(*args, **kwargs) + + def _list_subnets(self, *args, **kwargs): + """List subnets using admin creds """ + return self._admin_lister('subnets')(*args, **kwargs) + + def _list_routers(self, *args, **kwargs): + """List routers using admin creds """ + return self._admin_lister('routers')(*args, **kwargs) + + def _list_ports(self, *args, **kwargs): + """List ports using admin creds """ + return self._admin_lister('ports')(*args, **kwargs) + + def _admin_lister(self, resource_type): + def temp(*args, **kwargs): + temp_method = self.admin_manager.network_client.__getattr__( + 'list_%s' % resource_type) + resource_list = temp_method(*args, **kwargs) + return resource_list[resource_type] + return temp + + def _create_subnet(self, network, client=None, namestart='subnet-smoke', + **kwargs): + """ + Create a subnet for the given network within the cidr block + configured for tenant networks. + """ + if not client: + client = self.network_client + + def cidr_in_use(cidr, tenant_id): + """ + :return True if subnet with cidr already exist in tenant + False else + """ + cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr) + return len(cidr_in_use) != 0 + + ip_version = kwargs.pop('ip_version', 4) + + if ip_version == 6: + tenant_cidr = netaddr.IPNetwork( + CONF.network.tenant_network_v6_cidr) + num_bits = CONF.network.tenant_network_v6_mask_bits + else: + tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr) + num_bits = CONF.network.tenant_network_mask_bits + + result = None + str_cidr = None + # Repeatedly attempt subnet creation with sequential cidr + # blocks until an unallocated block is found. + for subnet_cidr in tenant_cidr.subnet(num_bits): + str_cidr = str(subnet_cidr) + if cidr_in_use(str_cidr, tenant_id=network.tenant_id): + continue + + subnet = dict( + name=data_utils.rand_name(namestart), + network_id=network.id, + tenant_id=network.tenant_id, + cidr=str_cidr, + ip_version=ip_version, + **kwargs + ) + try: + result = client.create_subnet(**subnet) + break + except lib_exc.Conflict as e: + is_overlapping_cidr = 'overlaps with another subnet' in str(e) + if not is_overlapping_cidr: + raise + self.assertIsNotNone(result, 'Unable to allocate tenant network') + subnet = net_resources.DeletableSubnet(client=client, + **result['subnet']) + self.assertEqual(subnet.cidr, str_cidr) + self.addCleanup(self.delete_wrapper, subnet.delete) + return subnet + + def _create_port(self, network_id, client=None, namestart='port-quotatest', + **kwargs): + if not client: + client = self.network_client + name = data_utils.rand_name(namestart) + result = client.create_port( + name=name, + network_id=network_id, + **kwargs) + self.assertIsNotNone(result, 'Unable to allocate port') + port = net_resources.DeletablePort(client=client, + **result['port']) + self.addCleanup(self.delete_wrapper, port.delete) + return port + + def _get_server_port_id_and_ip4(self, server, ip_addr=None): + ports = self._list_ports(device_id=server['id'], + fixed_ip=ip_addr) + self.assertEqual(len(ports), 1, + "Unable to determine which port to target.") + # it might happen here that this port has more then one ip address + # as in case of dual stack- when this port is created on 2 subnets + for ip46 in ports[0]['fixed_ips']: + ip = ip46['ip_address'] + if netaddr.valid_ipv4(ip): + return ports[0]['id'], ip + + def _get_network_by_name(self, network_name): + net = self._list_networks(name=network_name) + self.assertNotEqual(len(net), 0, + "Unable to get network by name: %s" % network_name) + return net_resources.AttributeDict(net[0]) + + def create_floating_ip(self, thing, external_network_id=None, + port_id=None, client=None): + """Creates a floating IP and associates to a resource/port using + Neutron client + """ + if not external_network_id: + external_network_id = CONF.network.public_network_id + if not client: + client = self.network_client + if not port_id: + port_id, ip4 = self._get_server_port_id_and_ip4(thing) + else: + ip4 = None + result = client.create_floatingip( + floating_network_id=external_network_id, + port_id=port_id, + tenant_id=thing['tenant_id'], + fixed_ip_address=ip4 + ) + floating_ip = net_resources.DeletableFloatingIp( + client=client, + **result['floatingip']) + self.addCleanup(self.delete_wrapper, floating_ip.delete) + return floating_ip + + def _associate_floating_ip(self, floating_ip, server): + port_id, _ = self._get_server_port_id_and_ip4(server) + floating_ip.update(port_id=port_id) + self.assertEqual(port_id, floating_ip.port_id) + return floating_ip + + def _disassociate_floating_ip(self, floating_ip): + """ + :param floating_ip: type DeletableFloatingIp + """ + floating_ip.update(port_id=None) + self.assertIsNone(floating_ip.port_id) + return floating_ip + + def check_floating_ip_status(self, floating_ip, status): + """Verifies floatingip reaches the given status + + :param floating_ip: net_resources.DeletableFloatingIp floating IP to + to check status + :param status: target status + :raises: AssertionError if status doesn't match + """ + def refresh(): + floating_ip.refresh() + return status == floating_ip.status + + test.call_until_true(refresh, + CONF.network.build_timeout, + CONF.network.build_interval) + self.assertEqual(status, floating_ip.status, + message="FloatingIP: {fp} is at status: {cst}. " + "failed to reach status: {st}" + .format(fp=floating_ip, cst=floating_ip.status, + st=status)) + LOG.info(_LI( + "FloatingIP: {fp} is at status: {st}" + ).format(fp=floating_ip, st=status)) + + def _check_tenant_network_connectivity(self, server, + username, + private_key, + should_connect=True, + servers_for_debug=None): + if not CONF.network.tenant_networks_reachable: + msg = 'Tenant networks not configured to be reachable.' + LOG.info(msg) + return + # The target login is assumed to have been configured for + # key-based authentication by cloud-init. + try: + for net_name, ip_addresses in six.iteritems(server['addresses']): + for ip_address in ip_addresses: + self.check_vm_connectivity(ip_address['addr'], + username, + private_key, + should_connect=should_connect) + except Exception as e: + LOG.exception(_LE('Tenant network connectivity check failed')) + self._log_console_output(servers_for_debug) + self._log_net_info(e) + raise + + def _check_remote_connectivity(self, source, dest, should_succeed=True): + """ + check ping server via source ssh connection + + :param source: RemoteClient: an ssh connection from which to ping + :param dest: and IP to ping against + :param should_succeed: boolean should ping succeed or not + :returns: boolean -- should_succeed == ping + :returns: ping is false if ping failed + """ + def ping_remote(): + try: + source.ping_host(dest) + except lib_exc.SSHExecCommandFailed: + LOG.warn(_LW( + "Failed to ping IP {des} via a ssh connection from: {src}" + ).format(des=dest, src=source.ssh_client.host)) + return not should_succeed + return should_succeed + + return test.call_until_true(ping_remote, + CONF.compute.ping_timeout, 1) + + def _create_security_group(self, client=None, tenant_id=None, + namestart='secgroup-smoke'): + if client is None: + client = self.network_client + if tenant_id is None: + tenant_id = client.tenant_id + secgroup = self._create_empty_security_group(namestart=namestart, + client=client, + tenant_id=tenant_id) + + # Add rules to the security group + rules = self._create_loginable_secgroup_rule(client=client, + secgroup=secgroup) + for rule in rules: + self.assertEqual(tenant_id, rule.tenant_id) + self.assertEqual(secgroup.id, rule.security_group_id) + return secgroup + + def _create_empty_security_group(self, client=None, tenant_id=None, + namestart='secgroup-smoke'): + """Create a security group without rules. + + Default rules will be created: + - IPv4 egress to any + - IPv6 egress to any + + :param tenant_id: secgroup will be created in this tenant + :returns: DeletableSecurityGroup -- containing the secgroup created + """ + if client is None: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + sg_name = data_utils.rand_name(namestart) + sg_desc = sg_name + " description" + sg_dict = dict(name=sg_name, + description=sg_desc) + sg_dict['tenant_id'] = tenant_id + result = client.create_security_group(**sg_dict) + secgroup = net_resources.DeletableSecurityGroup( + client=client, + **result['security_group'] + ) + self.assertEqual(secgroup.name, sg_name) + self.assertEqual(tenant_id, secgroup.tenant_id) + self.assertEqual(secgroup.description, sg_desc) + self.addCleanup(self.delete_wrapper, secgroup.delete) + return secgroup + + def _default_security_group(self, client=None, tenant_id=None): + """Get default secgroup for given tenant_id. + + :returns: DeletableSecurityGroup -- default secgroup for given tenant + """ + if client is None: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + sgs = [ + sg for sg in client.list_security_groups().values()[0] + if sg['tenant_id'] == tenant_id and sg['name'] == 'default' + ] + msg = "No default security group for tenant %s." % (tenant_id) + self.assertTrue(len(sgs) > 0, msg) + return net_resources.DeletableSecurityGroup(client=client, + **sgs[0]) + + def _create_security_group_rule(self, secgroup=None, client=None, + tenant_id=None, **kwargs): + """Create a rule from a dictionary of rule parameters. + + Create a rule in a secgroup. if secgroup not defined will search for + default secgroup in tenant_id. + + :param secgroup: type DeletableSecurityGroup. + :param tenant_id: if secgroup not passed -- the tenant in which to + search for default secgroup + :param kwargs: a dictionary containing rule parameters: + for example, to allow incoming ssh: + rule = { + direction: 'ingress' + protocol:'tcp', + port_range_min: 22, + port_range_max: 22 + } + """ + if client is None: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + if secgroup is None: + secgroup = self._default_security_group(client=client, + tenant_id=tenant_id) + + ruleset = dict(security_group_id=secgroup.id, + tenant_id=secgroup.tenant_id) + ruleset.update(kwargs) + + sg_rule = client.create_security_group_rule(**ruleset) + sg_rule = net_resources.DeletableSecurityGroupRule( + client=client, + **sg_rule['security_group_rule'] + ) + self.addCleanup(self.delete_wrapper, sg_rule.delete) + self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id) + self.assertEqual(secgroup.id, sg_rule.security_group_id) + + return sg_rule + + def _create_loginable_secgroup_rule(self, client=None, secgroup=None): + """These rules are intended to permit inbound ssh and icmp + traffic from all sources, so no group_id is provided. + Setting a group_id would only permit traffic from ports + belonging to the same security group. + """ + + if client is None: + client = self.network_client + rules = [] + rulesets = [ + dict( + # ssh + protocol='tcp', + port_range_min=22, + port_range_max=22, + ), + dict( + # ping + protocol='icmp', + ), + dict( + # ipv6-icmp for ping6 + protocol='icmp', + ethertype='IPv6', + ) + ] + for ruleset in rulesets: + for r_direction in ['ingress', 'egress']: + ruleset['direction'] = r_direction + try: + sg_rule = self._create_security_group_rule( + client=client, secgroup=secgroup, **ruleset) + except lib_exc.Conflict as ex: + # if rule already exist - skip rule and continue + msg = 'Security group rule already exists' + if msg not in ex._error_string: + raise ex + else: + self.assertEqual(r_direction, sg_rule.direction) + rules.append(sg_rule) + + return rules + + def _create_pool(self, lb_method, protocol, subnet_id): + """Wrapper utility that returns a test pool.""" + client = self.network_client + name = data_utils.rand_name('pool') + resp_pool = client.create_pool(protocol=protocol, name=name, + subnet_id=subnet_id, + lb_method=lb_method) + pool = net_resources.DeletablePool(client=client, **resp_pool['pool']) + self.assertEqual(pool['name'], name) + self.addCleanup(self.delete_wrapper, pool.delete) + return pool + + def _create_member(self, address, protocol_port, pool_id): + """Wrapper utility that returns a test member.""" + client = self.network_client + resp_member = client.create_member(protocol_port=protocol_port, + pool_id=pool_id, + address=address) + member = net_resources.DeletableMember(client=client, + **resp_member['member']) + self.addCleanup(self.delete_wrapper, member.delete) + return member + + def _create_vip(self, protocol, protocol_port, subnet_id, pool_id): + """Wrapper utility that returns a test vip.""" + client = self.network_client + name = data_utils.rand_name('vip') + resp_vip = client.create_vip(protocol=protocol, name=name, + subnet_id=subnet_id, pool_id=pool_id, + protocol_port=protocol_port) + vip = net_resources.DeletableVip(client=client, **resp_vip['vip']) + self.assertEqual(vip['name'], name) + self.addCleanup(self.delete_wrapper, vip.delete) + return vip + + def _ssh_to_server(self, server, private_key): + ssh_login = CONF.compute.image_ssh_user + return self.get_remote_client(server, + username=ssh_login, + private_key=private_key) + + def _get_router(self, client=None, tenant_id=None): + """Retrieve a router for the given tenant id. + + If a public router has been configured, it will be returned. + + If a public router has not been configured, but a public + network has, a tenant router will be created and returned that + routes traffic to the public network. + """ + if not client: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + router_id = CONF.network.public_router_id + network_id = CONF.network.public_network_id + if router_id: + body = client.show_router(router_id) + return net_resources.AttributeDict(**body['router']) + elif network_id: + router = self._create_router(client, tenant_id) + router.set_gateway(network_id) + return router + else: + raise Exception("Neither of 'public_router_id' or " + "'public_network_id' has been defined.") + + def _create_router(self, client=None, tenant_id=None, + namestart='router-smoke'): + if not client: + client = self.network_client + if not tenant_id: + tenant_id = client.tenant_id + name = data_utils.rand_name(namestart) + result = client.create_router(name=name, + admin_state_up=True, + tenant_id=tenant_id) + router = net_resources.DeletableRouter(client=client, + **result['router']) + self.assertEqual(router.name, name) + self.addCleanup(self.delete_wrapper, router.delete) + return router + + def _update_router_admin_state(self, router, admin_state_up): + router.update(admin_state_up=admin_state_up) + self.assertEqual(admin_state_up, router.admin_state_up) + + def create_networks(self, client=None, tenant_id=None, + dns_nameservers=None): + """Create a network with a subnet connected to a router. + + The baremetal driver is a special case since all nodes are + on the same shared network. + + :param client: network client to create resources with. + :param tenant_id: id of tenant to create resources in. + :param dns_nameservers: list of dns servers to send to subnet. + :returns: network, subnet, router + """ + if CONF.baremetal.driver_enabled: + # NOTE(Shrews): This exception is for environments where tenant + # credential isolation is available, but network separation is + # not (the current baremetal case). Likely can be removed when + # test account mgmt is reworked: + # https://blueprints.launchpad.net/tempest/+spec/test-accounts + if not CONF.compute.fixed_network_name: + m = 'fixed_network_name must be specified in config' + raise exceptions.InvalidConfiguration(m) + network = self._get_network_by_name( + CONF.compute.fixed_network_name) + router = None + subnet = None + else: + network = self._create_network(client=client, tenant_id=tenant_id) + router = self._get_router(client=client, tenant_id=tenant_id) + + subnet_kwargs = dict(network=network, client=client) + # use explicit check because empty list is a valid option + if dns_nameservers is not None: + subnet_kwargs['dns_nameservers'] = dns_nameservers + subnet = self._create_subnet(**subnet_kwargs) + subnet.add_to_router(router.id) + return network, subnet, router + + def create_server(self, name=None, image=None, flavor=None, + wait_on_boot=True, wait_on_delete=True, + create_kwargs=None): + vnic_type = CONF.network.port_vnic_type + + # If vnic_type is configured create port for + # every network + if vnic_type: + ports = [] + networks = [] + create_port_body = {'binding:vnic_type': vnic_type, + 'namestart': 'port-smoke'} + if create_kwargs: + net_client = create_kwargs.get("network_client", + self.network_client) + + # Convert security group names to security group ids + # to pass to create_port + if create_kwargs.get('security_groups'): + security_groups = net_client.list_security_groups().get( + 'security_groups') + sec_dict = dict([(s['name'], s['id']) + for s in security_groups]) + + sec_groups_names = [s['name'] for s in create_kwargs[ + 'security_groups']] + security_groups_ids = [sec_dict[s] + for s in sec_groups_names] + + if security_groups_ids: + create_port_body[ + 'security_groups'] = security_groups_ids + networks = create_kwargs.get('networks') + else: + net_client = self.network_client + # If there are no networks passed to us we look up + # for the tenant's private networks and create a port + # if there is only one private network. The same behaviour + # as we would expect when passing the call to the clients + # with no networks + if not networks: + networks = net_client.list_networks(filters={ + 'router:external': False}) + self.assertEqual(1, len(networks), + "There is more than one" + " network for the tenant") + for net in networks: + net_id = net['uuid'] + port = self._create_port(network_id=net_id, + client=net_client, + **create_port_body) + ports.append({'port': port.id}) + if ports: + create_kwargs['networks'] = ports + + return super(NetworkScenarioTest, self).create_server( + name=name, image=image, flavor=flavor, + wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete, + create_kwargs=create_kwargs) + + +# power/provision states as of icehouse +class BaremetalPowerStates(object): + """Possible power states of an Ironic node.""" + POWER_ON = 'power on' + POWER_OFF = 'power off' + REBOOT = 'rebooting' + SUSPEND = 'suspended' + + +class BaremetalProvisionStates(object): + """Possible provision states of an Ironic node.""" + NOSTATE = None + INIT = 'initializing' + ACTIVE = 'active' + BUILDING = 'building' + DEPLOYWAIT = 'wait call-back' + DEPLOYING = 'deploying' + DEPLOYFAIL = 'deploy failed' + DEPLOYDONE = 'deploy complete' + DELETING = 'deleting' + DELETED = 'deleted' + ERROR = 'error' + + +class BaremetalScenarioTest(ScenarioTest): + + credentials = ['primary', 'admin'] + + @classmethod + def skip_checks(cls): + super(BaremetalScenarioTest, cls).skip_checks() + if (not CONF.service_available.ironic or + not CONF.baremetal.driver_enabled): + msg = 'Ironic not available or Ironic compute driver not enabled' + raise cls.skipException(msg) + + @classmethod + def setup_clients(cls): + super(BaremetalScenarioTest, cls).setup_clients() + + cls.baremetal_client = cls.admin_manager.baremetal_client + + @classmethod + def resource_setup(cls): + super(BaremetalScenarioTest, cls).resource_setup() + # allow any issues obtaining the node list to raise early + cls.baremetal_client.list_nodes() + + def _node_state_timeout(self, node_id, state_attr, + target_states, timeout=10, interval=1): + if not isinstance(target_states, list): + target_states = [target_states] + + def check_state(): + node = self.get_node(node_id=node_id) + if node.get(state_attr) in target_states: + return True + return False + + if not test.call_until_true( + check_state, timeout, interval): + msg = ("Timed out waiting for node %s to reach %s state(s) %s" % + (node_id, state_attr, target_states)) + raise exceptions.TimeoutException(msg) + + def wait_provisioning_state(self, node_id, state, timeout): + self._node_state_timeout( + node_id=node_id, state_attr='provision_state', + target_states=state, timeout=timeout) + + def wait_power_state(self, node_id, state): + self._node_state_timeout( + node_id=node_id, state_attr='power_state', + target_states=state, timeout=CONF.baremetal.power_timeout) + + def wait_node(self, instance_id): + """Waits for a node to be associated with instance_id.""" + + def _get_node(): + node = None + try: + node = self.get_node(instance_id=instance_id) + except lib_exc.NotFound: + pass + return node is not None + + if not test.call_until_true( + _get_node, CONF.baremetal.association_timeout, 1): + msg = ('Timed out waiting to get Ironic node by instance id %s' + % instance_id) + raise exceptions.TimeoutException(msg) + + def get_node(self, node_id=None, instance_id=None): + if node_id: + _, body = self.baremetal_client.show_node(node_id) + return body + elif instance_id: + _, body = self.baremetal_client.show_node_by_instance_uuid( + instance_id) + if body['nodes']: + return body['nodes'][0] + + def get_ports(self, node_uuid): + ports = [] + _, body = self.baremetal_client.list_node_ports(node_uuid) + for port in body['ports']: + _, p = self.baremetal_client.show_port(port['uuid']) + ports.append(p) + return ports + + def add_keypair(self): + self.keypair = self.create_keypair() + + def verify_connectivity(self, ip=None): + if ip: + dest = self.get_remote_client(ip) + else: + dest = self.get_remote_client(self.instance) + dest.validate_authentication() + + def boot_instance(self): + create_kwargs = { + 'key_name': self.keypair['name'] + } + self.instance = self.create_server( + wait_on_boot=False, create_kwargs=create_kwargs) + + self.wait_node(self.instance['id']) + self.node = self.get_node(instance_id=self.instance['id']) + + self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON) + + self.wait_provisioning_state( + self.node['uuid'], + [BaremetalProvisionStates.DEPLOYWAIT, + BaremetalProvisionStates.ACTIVE], + timeout=15) + + self.wait_provisioning_state(self.node['uuid'], + BaremetalProvisionStates.ACTIVE, + timeout=CONF.baremetal.active_timeout) + + self.servers_client.wait_for_server_status(self.instance['id'], + 'ACTIVE') + self.node = self.get_node(instance_id=self.instance['id']) + self.instance = self.servers_client.get_server(self.instance['id']) + + def terminate_instance(self): + self.servers_client.delete_server(self.instance['id']) + self.wait_power_state(self.node['uuid'], + BaremetalPowerStates.POWER_OFF) + self.wait_provisioning_state( + self.node['uuid'], + BaremetalProvisionStates.NOSTATE, + timeout=CONF.baremetal.unprovision_timeout) + + +class EncryptionScenarioTest(ScenarioTest): + """ + Base class for encryption scenario tests + """ + + credentials = ['primary', 'admin'] + + @classmethod + def setup_clients(cls): + super(EncryptionScenarioTest, cls).setup_clients() + cls.admin_volume_types_client = cls.os_adm.volume_types_client + + def _wait_for_volume_status(self, status): + self.status_timeout( + self.volume_client.volumes, self.volume.id, status) + + def nova_boot(self): + self.keypair = self.create_keypair() + create_kwargs = {'key_name': self.keypair['name']} + self.server = self.create_server(image=self.image, + create_kwargs=create_kwargs) + + def create_volume_type(self, client=None, name=None): + if not client: + client = self.admin_volume_types_client + if not name: + name = 'generic' + randomized_name = data_utils.rand_name('scenario-type-' + name) + LOG.debug("Creating a volume type: %s", randomized_name) + body = client.create_volume_type( + randomized_name) + self.assertIn('id', body) + self.addCleanup(client.delete_volume_type, body['id']) + return body + + def create_encryption_type(self, client=None, type_id=None, provider=None, + key_size=None, cipher=None, + control_location=None): + if not client: + client = self.admin_volume_types_client + if not type_id: + volume_type = self.create_volume_type() + type_id = volume_type['id'] + LOG.debug("Creating an encryption type for volume type: %s", type_id) + client.create_encryption_type( + type_id, provider=provider, key_size=key_size, cipher=cipher, + control_location=control_location) + + +class SwiftScenarioTest(ScenarioTest): + """ + Provide harness to do Swift scenario tests. + + Subclasses implement the tests that use the methods provided by this + class. + """ + + @classmethod + def skip_checks(cls): + super(SwiftScenarioTest, cls).skip_checks() + if not CONF.service_available.swift: + skip_msg = ("%s skipped as swift is not available" % + cls.__name__) + raise cls.skipException(skip_msg) + + @classmethod + def setup_credentials(cls): + cls.set_network_resources() + super(SwiftScenarioTest, cls).setup_credentials() + operator_role = CONF.object_storage.operator_role + cls.os_operator = cls.get_client_manager(roles=[operator_role]) + + @classmethod + def setup_clients(cls): + super(SwiftScenarioTest, cls).setup_clients() + # Clients for Swift + cls.account_client = cls.os_operator.account_client + cls.container_client = cls.os_operator.container_client + cls.object_client = cls.os_operator.object_client + + def get_swift_stat(self): + """get swift status for our user account.""" + self.account_client.list_account_containers() + LOG.debug('Swift status information obtained successfully') + + def create_container(self, container_name=None): + name = container_name or data_utils.rand_name( + 'swift-scenario-container') + self.container_client.create_container(name) + # look for the container to assure it is created + self.list_and_check_container_objects(name) + LOG.debug('Container %s created' % (name)) + self.addCleanup(self.delete_wrapper, + self.container_client.delete_container, + name) + return name + + def delete_container(self, container_name): + self.container_client.delete_container(container_name) + LOG.debug('Container %s deleted' % (container_name)) + + def upload_object_to_container(self, container_name, obj_name=None): + obj_name = obj_name or data_utils.rand_name('swift-scenario-object') + obj_data = data_utils.arbitrary_string() + self.object_client.create_object(container_name, obj_name, obj_data) + self.addCleanup(self.delete_wrapper, + self.object_client.delete_object, + container_name, + obj_name) + return obj_name, obj_data + + def delete_object(self, container_name, filename): + self.object_client.delete_object(container_name, filename) + self.list_and_check_container_objects(container_name, + not_present_obj=[filename]) + + def list_and_check_container_objects(self, container_name, + present_obj=None, + not_present_obj=None): + """ + List objects for a given container and assert which are present and + which are not. + """ + if present_obj is None: + present_obj = [] + if not_present_obj is None: + not_present_obj = [] + _, object_list = self.container_client.list_container_contents( + container_name) + if present_obj: + for obj in present_obj: + self.assertIn(obj, object_list) + if not_present_obj: + for obj in not_present_obj: + self.assertNotIn(obj, object_list) + + def change_container_acl(self, container_name, acl): + metadata_param = {'metadata_prefix': 'x-container-', + 'metadata': {'read': acl}} + self.container_client.update_container_metadata(container_name, + **metadata_param) + resp, _ = self.container_client.list_container_metadata(container_name) + self.assertEqual(resp['x-container-read'], acl) + + def download_and_verify(self, container_name, obj_name, expected_data): + _, obj = self.object_client.get_object(container_name, obj_name) + self.assertEqual(obj, expected_data) diff --git a/neutron_lbaas/tests/tempest/v2/scenario/test_load_balancer_basic.py b/neutron_lbaas/tests/tempest/v2/scenario/test_load_balancer_basic.py new file mode 100644 index 000000000..143f3d8f1 --- /dev/null +++ b/neutron_lbaas/tests/tempest/v2/scenario/test_load_balancer_basic.py @@ -0,0 +1,85 @@ +# Copyright 2015 Rackspace Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron_lbaas.tests.tempest.lib import config +from neutron_lbaas.tests.tempest.lib import test +from neutron_lbaas.tests.tempest.v2.clients import listeners_client +from neutron_lbaas.tests.tempest.v2.clients import load_balancers_client +from neutron_lbaas.tests.tempest.v2.clients import members_client +from neutron_lbaas.tests.tempest.v2.clients import pools_client +from neutron_lbaas.tests.tempest.v2.scenario import base + +CONF = config.CONF + +LOG = logging.getLogger(__name__) + + +class TestLoadBalancerBasic(base.BaseTestCase): + + """ + This test checks basic load balancing. + The following is the scenario outline: + 1. Create an instance + 2. SSH to the instance and start two servers + 3. Create a load balancer with two members and with ROUND_ROBIN algorithm + associate the VIP with a floating ip + 4. Send NUM requests to the floating ip and check that they are shared + between the two servers. + """ + def setUp(self): + super(TestLoadBalancerBasic, self).setUp() + self.server_ips = {} + self.server_fixed_ips = {} + self._create_security_group_for_test() + self._set_net_and_subnet() + + mgr = self.get_client_manager() + auth_provider = mgr.auth_provider + client_args = [auth_provider, 'network', 'regionOne'] + + self.load_balancers_client = ( + load_balancers_client.LoadBalancersClientJSON(*client_args)) + self.listeners_client = ( + listeners_client.ListenersClientJSON(*client_args)) + self.pools_client = pools_client.PoolsClientJSON(*client_args) + self.members_client = members_client.MembersClientJSON(*client_args) + + def tearDown(self): + super(TestLoadBalancerBasic, self).tearDown() + + @test.services('compute', 'network') + def test_load_balancer_basic(self): + self._create_servers() + self._start_servers() + self._create_load_balancer() + self._check_load_balancing() + + lbs = self.load_balancers_client.list_load_balancers() + for lb_entity in lbs: + lb_id = lb_entity['id'] + lb = self.load_balancers_client.get_load_balancer_status_tree( + lb_id).get('loadbalancer') + for listener in lb.get('listeners'): + for pool in listener.get('pools'): + self.delete_wrapper(self.pools_client.delete_pool, + pool.get('id')) + self._wait_for_load_balancer_status(lb_id) + self.delete_wrapper(self.listeners_client.delete_listener, + listener.get('id')) + self._wait_for_load_balancer_status(lb_id) + self.delete_wrapper( + self.load_balancers_client.delete_load_balancer, lb_id)