Update and add new system tests.

-added new tests:
 dvs_vcenter_security
 dvs_vcenter_tenants_isolation
 dvs_vcenter_same_ip
 dvs_vcenter_maintenance
 dvs_vcenter_bind_port
-updated structure of test suites according test plan.
-change defaults name of external and internal admin net

Change-Id: I3b89255618ba231f4cc907ef21b743ee9e2a16f6
This commit is contained in:
otsvigun 2015-12-10 16:29:34 +02:00
parent 133ab3cb4b
commit 0b746a7dfe
11 changed files with 1901 additions and 1158 deletions

@ -1 +1 @@
Subproject commit 202126f27c9ac907a6a26d0886397c208c042071
Subproject commit 5c12a9b3161820b06ee56fb0e04fd23168b46ff7

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,237 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test import logger
#defaults
external_net_name = 'admin_floating_net'
zone_image_maps = {'vcenter': 'TestVM-VMDK',
'nova': 'TestVM'}
def create_instances(os_conn=None, vm_count=None, nics=None,
security_group=None):
"""Create Vms on available hypervisors
:param os_conn: type object, openstack
:param vm_count: type interger, count of VMs to create
:param nics: type dictionary, neutron networks
to assign to instance
:param security_group: type dictionary, security group to assign to
instances
"""
boot_timeout = 300
# Get list of available images,flavors and hipervisors
images_list = os_conn.nova.images.list()
flavors_list = os_conn.nova.flavors.list()
available_hosts = os_conn.nova.services.list(binary='nova-compute')
for host in available_hosts:
for zone in zone_image_maps.keys():
if host.zone == zone:
image = [image for image
in images_list
if image.name == zone_image_maps[zone]][0]
os_conn.nova.servers.create(
flavor=flavors_list[0],
name='test_{0}'.format(image.name),
image=image, min_count=vm_count,
availability_zone='{0}:{1}'.format(host.zone, host.host),
nics=nics
)
# Verify that current state of each VMs is Active
srv_list = os_conn.get_servers()
for srv in srv_list:
assert_true(os_conn.get_instance_detail(srv).status != 'ERROR',
"Current state of Vm {0} is {1}".format(
srv.name, os_conn.get_instance_detail(srv).status))
try:
wait(
lambda:
os_conn.get_instance_detail(srv).status == "ACTIVE",
timeout=boot_timeout)
except TimeoutError:
logger.error(
"Timeout is reached.Current state of Vm {0} is {1}".format(
srv.name, os_conn.get_instance_detail(srv).status))
# assign security group
if security_group:
srv.add_security_group(security_group)
def check_connection_vms(os_conn, srv_list, remote,
result_of_ping=0,
destination_ip=None):
"""Check network connectivity between instancea and destination ip
with ping
:param os_conn: type object, openstack
:param srv_list: type list, instances
:param packets: type int, packets count of icmp reply
:param remote: SSHClient to primary controller
:param destination_ip: type list, remote destination ip to
check by ping
"""
creds = ("cirros", "cubswin:)")
icmp_count = 10
for srv in srv_list:
addresses = srv.addresses[srv.addresses.keys()[0]]
fip = [add['addr'] for add in addresses
if add['OS-EXT-IPS:type'] == 'floating'][0]
logger.info("Connect to VM {0}".format(fip))
if not destination_ip:
for s in srv_list:
if s != srv:
ip = s.networks[s.networks.keys()[0]][0]
ping_command = "ping -c {0} {1}".format(
icmp_count, ip)
ping_result = os_conn.execute_through_host(
remote, fip,
ping_command,
creds)
logger.info("Ping result: \n"
"{0}\n"
"{1}\n"
"exit_code={2}"
.format(ping_result['stdout'],
ping_result['stderr'],
ping_result['exit_code']))
else:
for ip in destination_ip:
if ip != srv.networks[srv.networks.keys()[0]][0]:
ping_command = "ping -c {0} {1}".format(
icmp_count, ip)
ping_result = os_conn.execute_through_host(
remote, fip,
ping_command, creds)
logger.info("Ping result: \n"
"{0}\n"
"{1}\n"
"exit_code={2}"
.format(ping_result['stdout'],
ping_result['stderr'],
ping_result['exit_code']))
assert_true(
result_of_ping == ping_result['exit_code'],
"Ping VM{0} from Vm {1},"
" not reached {2}".format(ip, fip, ping_result)
)
def create_and_assign_floating_ip(os_conn, srv_list=None,
ext_net=None, tenant_id=None):
"""Create Vms on available hypervisors
:param os_conn: type object, openstack
:param srv_list: type list, objects of created instances
:param ext_net: type object, neutron external network
:param tenant_id: type string, tenant id
"""
if not ext_net:
ext_net = [net for net
in os_conn.neutron.list_networks()["networks"]
if net['name'] == external_net_name][0]
if not tenant_id:
tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id
if not srv_list:
srv_list = os_conn.get_servers()
for srv in srv_list:
fip = os_conn.neutron.create_floatingip(
{'floatingip': {
'floating_network_id': ext_net['id'],
'tenant_id': tenant_id}})
os_conn.nova.servers.add_floating_ip(
srv, fip['floatingip']['floating_ip_address']
)
def add_router(os_conn, router_name, ext_net_name=external_net_name,
tenant_name=SERVTEST_TENANT):
"""Create router with gateway
:param router_name: type string
:param ext_net_name: type string
:param tenant_name: type string
"""
ext_net = [net for net
in os_conn.neutron.list_networks()["networks"]
if net['name'] == ext_net_name][0]
gateway = {"network_id": ext_net["id"],
"enable_snat": True
}
tenant_id = os_conn.get_tenant(tenant_name).id
router_param = {'router': {'name': router_name,
'external_gateway_info': gateway,
'tenant_id': tenant_id}}
router = os_conn.neutron.create_router(body=router_param)['router']
return router
def add_subnet_to_router(os_conn, router_id, sub_id):
os_conn.neutron.add_interface_router(
router_id,
{'subnet_id': sub_id}
)
def create_network(os_conn, name,
tenant_name=SERVTEST_TENANT):
tenant_id = os_conn.get_tenant(tenant_name).id
net_body = {"network": {"name": name,
"tenant_id": tenant_id
}
}
network = os_conn.neutron.create_network(net_body)['network']
return network
def create_subnet(os_conn, network,
cidr, tenant_name=SERVTEST_TENANT):
tenant_id = os_conn.get_tenant(tenant_name).id
subnet_body = {"subnet": {"network_id": network['id'],
"ip_version": 4,
"cidr": cidr,
"name": 'subnet_{}'.format(
network['name'][-1]),
"tenant_id": tenant_id
}
}
subnet = os_conn.neutron.create_subnet(subnet_body)['subnet']
return subnet
def get_role(os_conn, role_name):
role_list = os_conn.keystone.roles.list()
for role in role_list:
if role.name == role_name:
return role
return None
def add_role_to_user(os_conn, user_name, role_name, tenant_name):
tenant_id = os_conn.get_tenant(tenant_name).id
user_id = os_conn.get_user(user_name).id
role_id = get_role(os_conn, role_name).id
os_conn.keystone.roles.add_user_role(user_id, role_id, tenant_id)

View File

@ -0,0 +1,53 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_true
from fuelweb_test.helpers import checkers
from fuelweb_test import logger
# constants
DVS_PLUGIN_PATH = os.environ.get('DVS_PLUGIN_PATH')
plugin_name = 'fuel-plugin-vmware-dvs'
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
dvs_switch_name = ['dvSwitch']
def install_dvs_plugin(master_node):
# copy plugins to the master node
checkers.upload_tarball(
master_node,
DVS_PLUGIN_PATH, "/var")
# install plugin
checkers.install_plugin_check_code(
master_node,
plugin=os.path.basename(DVS_PLUGIN_PATH))
def enable_plugin(cluster_id, fuel_web_client):
assert_true(
fuel_web_client.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True,
'#1_vmware_dvs_net_maps/value': dvs_switch_name[0]}
fuel_web_client.update_plugin_data(cluster_id, plugin_name, options)
logger.info("cluster is {}".format(cluster_id))

View File

@ -42,8 +42,10 @@ class CloseSSHConnectionsPlugin(Plugin):
def import_tests():
from tests import test_plugin_vmware_dvs
from tests import test_plugin_vmware_dvs_destructive
from tests import test_plugin_vmware_dvs_maintenance
from tests import test_plugin_vmware_dvs_smoke
from tests import test_plugin_vmware_dvs_system
def run_tests():
from proboscis import TestProgram # noqa

View File

@ -0,0 +1,147 @@
adv_net_template:
default:
nic_mapping:
default:
if1: eth0 # admin
if2: eth1 # public
if3: eth4 # management
if4: eth2 # private
if5: eth3 # storage
templates_for_node_role:
controller:
- public
- private
- storage
- common
compute-vmware:
- common
- private
- storage
compute-vmware:
- common
- private
- storage
compute:
- common
- private
- storage
ceph-osd:
- common
- storage
ceph-osd:
- common
- storage
cinder-vmware:
- common
- storage
- custom
network_assignments:
storage:
ep: br-storage
private:
ep: br-prv
public:
ep: br-ex
management:
ep: br-mgmt
fuelweb_admin:
ep: br-fw-admin
network_scheme:
storage:
transformations:
- action: add-br
name: br-storage
- action: add-port
bridge: br-storage
name: <% if5 %>
endpoints:
- br-storage
roles:
cinder/iscsi: br-storage
swift/replication: br-storage
ceph/replication: br-storage
storage: br-storage
private:
transformations:
- action: add-br
name: br-prv
provider: ovs
- action: add-br
name: br-aux
- action: add-patch
bridges:
- br-prv
- br-aux
provider: ovs
mtu: 65000
- action: add-port
bridge: br-aux
name: <% if4 %>
endpoints:
- br-prv
roles:
neutron/private: br-prv
public:
transformations:
- action: add-br
name: br-ex
- action: add-br
name: br-floating
provider: ovs
- action: add-patch
bridges:
- br-floating
- br-ex
provider: ovs
mtu: 65000
- action: add-port
bridge: br-ex
name: <% if2 %>
endpoints:
- br-ex
roles:
public/vip: br-ex
neutron/floating: br-floating
ceph/radosgw: br-ex
ex: br-ex
common:
transformations:
- action: add-br
name: br-fw-admin
- action: add-port
bridge: br-fw-admin
name: <% if1 %>
- action: add-br
name: br-mgmt
- action: add-port
bridge: br-mgmt
name: <% if3 %>
endpoints:
- br-fw-admin
- br-mgmt
roles:
admin/pxe: br-fw-admin
fw-admin: br-fw-admin
mongo/db: br-mgmt
management: br-mgmt
keystone/api: br-mgmt
neutron/api: br-mgmt
neutron/mesh: br-mgmt
swift/api: br-mgmt
sahara/api: br-mgmt
ceilometer/api: br-mgmt
cinder/api: br-mgmt
glance/api: br-mgmt
heat/api: br-mgmt
nova/api: br-mgmt
nova/migration: br-mgmt
murano/api: br-mgmt
horizon: br-mgmt
mgmt/api: br-mgmt
mgmt/memcache: br-mgmt
mgmt/database: br-mgmt
mgmt/messaging: br-mgmt
mgmt/corosync: br-mgmt
mgmt/vip: br-mgmt
mgmt/api: br-mgmt
ceph/public: br-mgmt

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,418 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.settings import SERVTEST_USERNAME
from fuelweb_test.settings import SERVTEST_PASSWORD
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.helpers import os_actions
from helpers import plugin
from helpers import openstack
@test(groups=["plugins", 'dvs_vcenter_plugin', 'dvs_vcenter_system'])
class TestDVSPlugin(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["dvs_vcenter_add_delete_nodes", "dvs_vcenter_plugin"])
@log_snapshot_after_test
def dvs_vcenter_add_delete_nodes(self):
"""Deploy cluster with plugin and vmware datastore backend
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 2 node with cinder-vmdk role.
6. Add 1 node with compute role.
7. Remove node with cinder-vmdk role.
8. Add node with cinder role.
9. Redeploy cluster.
10. Run OSTF.
11. Remove node with compute role.
12. Add node with cinder-vmdk role.
13. Redeploy cluster.
14. Run OSTF.
Duration 3 hours
"""
self.env.revert_snapshot("ready_with_9_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['cinder-vmware'],
'slave-05': ['compute'],
'slave-06': ['compute'],
'slave-07': ['compute-vmware'], })
# Configure VMWare vCenter settings
target_node_1 = self.node_name('slave-07')
self.fuel_web.vcenter_configure(
cluster_id,
target_node_1=target_node_1,
multiclusters=True
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
# Remove node with cinder-vmdk role
self.fuel_web.update_nodes(
cluster_id,
{'slave-04': ['cinder-vmware'], }, False, True)
# Add 1 node with cinder role and redeploy cluster
self.fuel_web.update_nodes(
cluster_id,
{
'slave-08': ['cinder'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
# Remove node with compute role
self.fuel_web.update_nodes(
cluster_id,
{'slave-05': ['compute'], }, False, True)
# Add 1 node with cinder-vmdk role and redeploy cluster
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['cinder-vmware'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["dvs_vcenter_add_delete_controller", "dvs_vcenter_plugin"])
@log_snapshot_after_test
def dvs_vcenter_add_delete_controller(self):
"""Deploy cluster with plugin, adding and deletion controler node.
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 4 node with controller role.
5. Add 1 node with cinder-vmdk role.
6. Add 1 node with compute role.
7. Deploy cluster.
8. Run OSTF.
9. Remove node with controller role.
10. Redeploy cluster.
11. Run OSTF.
12. Add node with controller role.
13. Redeploy cluster.
14. Run OSTF.
Duration 3.5 hours
"""
self.env.revert_snapshot("ready_with_9_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['controller'],
'slave-05': ['cinder-vmware'],
'slave-06': ['compute'], })
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
# Remove node with controller role
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'], }, False, True)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
# Add node with controller role
self.fuel_web.update_nodes(
cluster_id,
{
'slave-07': ['controller'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["dvs_vcenter_destructive_setup", "dvs_vcenter_plugin"])
@log_snapshot_after_test
def dvs_vcenter_destructive_setup(self):
"""Deploy cluster with plugin and vmware datastore backend
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 1 node with controller role.
5. Add 1 node with compute role.
6. Add 1 node with compute-vmware role.
7. Deploy the cluster.
8. Run OSTF.
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster with 2 vcenter clusters and vcenter glance
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
'images_vcenter': True
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['compute-vmware'],
'slave-03': ['compute']
}
)
# Configure VMWare vCenter settings
target_node_2 = self.node_name('slave-02')
self.fuel_web.vcenter_configure(
cluster_id,
target_node_2=target_node_2,
multiclusters=True,
vc_glance=True
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
self.env.make_snapshot("dvs_vcenter_destructive_setup", is_make=True)
@test(depends_on=[dvs_vcenter_destructive_setup],
groups=["dvs_vcenter_uninstall", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_uninstall(self):
"""Verify that it is not possibility to uninstall
of Fuel DVS plugin with deployed environment.
Scenario:
1. Revert snapshot to dvs_vcenter_bvt_2
2. Try to uninstall dvs plugin.
Duration 1.8 hours
"""
self.env.revert_snapshot("dvs_vcenter_destructive_setup")
# Try to uninstall dvs plugin
cmd = 'fuel plugins --remove {}==1.1.0'.format(plugin.plugin_name)
self.env.d_env.get_admin_remote().execute(cmd)['exit_code'] == 1
# Check that plugin is not removed
output = list(self.env.d_env.get_admin_remote().execute(
'fuel plugins list')['stdout'])
assert_true(
plugin.plugin_name in output[-1].split(' '),
"Plugin is removed {}".format(plugin.plugin_name)
)
@test(depends_on=[dvs_vcenter_destructive_setup],
groups=["dvs_vcenter_destructive_setup", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_bind_port(self):
"""Check abilities to bind port on DVS to VM,
disable and enable this port.
Scenario:
1. Revert snapshot to dvs_vcenter_bvt_2
2. Create private networks net01 with sunet.
3. Launch instances VM_1 and VM_2 in the net01
with image TestVM and flavor m1.micro in nova az.
4. Launch instances VM_3 and VM_4 in the net01
with image TestVM-VMDK and flavor m1.micro in nova az.
4. Bind sub_net port of Vms
5. Check VMs are not available.
6. Enable sub_net port of all Vms.
7. Verify that VMs should communicate between each other.
Send icmp ping between VMs.
Duration 1,5 hours
"""
self.env.revert_snapshot("dvs_vcenter_destructive_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Create new network
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# create security group with rules for ssh and ping
security_group = {}
security_group[os_conn.get_tenant(SERVTEST_TENANT).id] =\
os_conn.create_sec_group_for_ssh()
security_group = security_group[
os_conn.get_tenant(SERVTEST_TENANT).id].id
# Launch instance VM_1 and VM_2
network = os_conn.nova.networks.find(label='net04')
openstack.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': network.id}], security_group=security_group
)
openstack.create_and_assign_floating_ip(os_conn=os_conn)
time.sleep(30) # need time to apply updates
# Bind sub_net ports of Vms
ports = os_conn.neutron.list_ports()['ports']
srv_list = os_conn.get_servers()
for srv in srv_list:
srv_addr = srv.networks[srv.networks.keys()[0]][0]
for port in ports:
port_addr = port['fixed_ips'][0]['ip_address']
if srv_addr == port_addr:
os_conn.neutron.update_port(
port['id'], {'port': {'admin_state_up': False}}
)
srv_list = os_conn.get_servers()
# Verify that not connection to VMs
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh_controller = self.fuel_web.get_ssh_for_node(
primary_controller.name
)
try:
openstack.check_connection_vms(
os_conn=os_conn, srv_list=srv_list, remote=ssh_controller)
except Exception as e:
logger.info(str(e))
# Enable sub_net ports of VMs
for srv in srv_list:
srv_addr = srv.networks[srv.networks.keys()[0]][0]
for port in ports:
port_addr = port['fixed_ips'][0]['ip_address']
if srv_addr == port_addr:
os_conn.neutron.update_port(
port['id'], {'port': {'admin_state_up': True}}
)
srv_list = os_conn.get_servers()
for srv in srv_list:
srv.reboot()
wait(
lambda:
os_conn.get_instance_detail(srv).status == "ACTIVE",
timeout=300)
time.sleep(60) # need time after reboot to get ip by instance
# Verify that VMs should communicate between each other.
# Send icmp ping between VMs
openstack.check_connection_vms(os_conn=os_conn, srv_list=srv_list,
remote=ssh_controller)

View File

@ -0,0 +1,90 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from helpers import plugin
@test(groups=["plugins"])
class TestDVSPlugin(TestBasic):
# constants
node_name = lambda self, name_node: self.fuel_web. \
get_nailgun_node_by_name(name_node)['hostname']
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["dvs_vcenter_maintenance"])
@log_snapshot_after_test
def dvs_vcenter_maintenance(self):
"""Deploy cluster with plugin and vmware datastore backend
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller+mongo+cinder-vmware role.
5. Add 2 node with compute role.
6. Add 1 node with compute-vmware role.
7. Deploy the cluster.
8. Run OSTF.
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_9_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster with 2 vcenter clusters and vcenter glance
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
'images_vcenter': True
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller', 'mongo', 'cinder-vmware'],
'slave-02': ['controller', 'mongo', 'cinder-vmware'],
'slave-03': ['controller', 'mongo', 'cinder-vmware'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['compute-vmware']}
)
# Configure VMWare vCenter settings
target_node_2 = self.node_name('slave-06')
self.fuel_web.vcenter_configure(
cluster_id,
target_node_2=target_node_2,
multiclusters=True,
vc_glance=True
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'tests_platform'])

View File

@ -0,0 +1,171 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from helpers import plugin
@test(groups=["plugins", 'dvs_vcenter_plugin'])
class TestDVSPlugin(TestBasic):
# constants
node_name = lambda self, name_node: self.fuel_web. \
get_nailgun_node_by_name(name_node)['hostname']
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["dvs_vcenter_smoke", "dvs_vcenter_plugin"])
@log_snapshot_after_test
def dvs_vcenter_smoke(self):
"""Check deployment with VMware DVS plugin and one controller.
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
4. Add 1 node with controller role.
5. Configure interfaces on nodes.
6. Configure network settings.
7. Enable and configure DVS plugin.
8. Enable VMWare vCenter/ESXi datastore for images (Glance)
9 Configure VMware vCenter Settings.
Add 1 vSphere clusters and configure Nova Compute instances
on conrollers.
10. Deploy the cluster.
11. Run OSTF.
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_3_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster with 2 vcenter clusters
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
'images_vcenter': True
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id, vc_glance=True)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["dvs_vcenter_bvt", "dvs_vcenter_plugin"])
@log_snapshot_after_test
def dvs_vcenter_bvt(self):
"""Deploy cluster with DVS plugin and ceph storage.
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: ceph
* Additional services: default
4. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
* CephOSD
* CephOSD
* CinderVMware
* ComputeVMware
5. Configure interfaces on nodes.
6. Configure network settings.
7. Enable and configure DVS plugin.
Configure VMware vCenter Settings. Add 2 vSphere clusters
and configure Nova Compute instances on conroller
and compute-vmware.
8. Verify networks.
9. Deploy the cluster.
10. Run OSTF.
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_9_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster with 2 vcenter clusters and vcenter glance
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
'images_ceph': True,
'volumes_ceph': True,
'objects_ceph': True,
'volumes_lvm': False
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute-vmware'],
'slave-06': ['cinder-vmware'],
'slave-07': ['ceph-osd'],
'slave-08': ['ceph-osd'],
'slave-09': ['ceph-osd']}
)
# Configure VMWare vCenter settings
target_node_2 = self.node_name('slave-05')
self.fuel_web.vcenter_configure(
cluster_id,
target_node_2=target_node_2,
multiclusters=True
)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])

View File

@ -0,0 +1,779 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from proboscis import test
from proboscis.asserts import assert_true
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.settings import SERVTEST_USERNAME
from fuelweb_test.settings import SERVTEST_PASSWORD
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.helpers import os_actions
from helpers import plugin
from helpers import openstack
@test(groups=["plugins", 'dvs_vcenter_system'])
class TestDVSPlugin(TestBasic):
# constants
node_name = lambda self, name_node: self.fuel_web. \
get_nailgun_node_by_name(name_node)['hostname']
net_data = [{'net_1': '192.168.112.0/24'},
{'net_2': '192.168.113.0/24'}]
# defaults
ext_net_name = 'admin_floating_net'
inter_net_name = 'admin_internal_net'
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["dvs_vcenter_systest_setup", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_systest_setup(self):
"""Deploy cluster with plugin and vmware datastore backend
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 1 node with controller role.
5. Add 2 node with compute role.
6. Add 1 node with compute-vmware role.
7. Deploy the cluster.
8. Run OSTF.
9. Create snapshot.
Duration 1.8 hours
Snapshot dvs_vcenter_systest_setup
"""
self.env.revert_snapshot("ready_with_5_slaves")
plugin.install_dvs_plugin(self.env.d_env.get_admin_remote())
# Configure cluster with 2 vcenter clusters and vcenter glance
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
'images_vcenter': True
}
)
plugin.enable_plugin(cluster_id, self.fuel_web)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['compute-vmware'],
'slave-03': ['compute'],
'slave-04': ['compute']
}
)
# Configure VMWare vCenter settings
target_node_2 = self.node_name('slave-02')
self.fuel_web.vcenter_configure(
cluster_id,
target_node_2=target_node_2,
multiclusters=True,
vc_glance=True
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
self.env.make_snapshot("dvs_vcenter_systest_setup", is_make=True)
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_networks", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_networks(self):
"""Check abilities to create and terminate networks on DVS.
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Add 2 private networks net_1 and net_2.
3. Check that networks are created.
4. Delete net_1.
5. Check that net_1 is deleted.
6. Add net_1 again.
Duration 15 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Create new network
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = (
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
subnets = []
networks = []
for net in self.net_data:
logger.info('Create network {}'.format(net.keys()[0]))
network = openstack.create_network(
os_conn,
net.keys()[0], tenant_name=SERVTEST_TENANT
)
logger.info('Create subnet {}'.format(net.keys()[0]))
subnet = openstack.create_subnet(
os_conn,
network,
net[net.keys()[0]], tenant_name=SERVTEST_TENANT
)
subnets.append(subnet)
networks.append(network)
# Check that networks are created.
for network in networks:
assert_true(
os_conn.get_network(network['name'])['id'] == network['id']
)
# Delete net_1.
logger.info('Delete network net_1')
os_conn.neutron.delete_subnet(subnets[0]['id'])
os_conn.neutron.delete_network(networks[0]['id'])
# Check that net_1 is deleted.
assert_true(
os_conn.get_network(networks[0]) is None
)
logger.info('Networks net_1 is removed.')
logger.info('Created net_1 again.')
network = openstack.create_network(
os_conn,
self.net_data[0].keys()[0])
subnet = openstack.create_subnet(
os_conn,
network,
self.net_data[0][self.net_data[0].keys()[0]],
tenant_name=SERVTEST_TENANT
)
assert_true(
os_conn.get_network(network['name'])['id'] == network['id']
)
logger.info('Networks net_1 and net_2 are present.')
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_ping_public", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_ping_public(self):
"""Check connectivity Vms to public network with floating ip.
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Create private networks net01 with sunet.
3. Add one subnet (net01_subnet01: 192.168.101.0/24
4. Create Router_01, set gateway and add interface
to external network.
5. Launch instances VM_1 and VM_2 in the net01
with image TestVM and flavor m1.micro in nova az.
6. Launch instances VM_3 and VM_4 in the net01
with image TestVM-VMDK and flavor m1.micro in vcenter az.
7. Send ping from instances to 8.8.8.8
or other outside ip.
Duration 15 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Create new network
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# Create non default network with subnet.
logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
network = openstack.create_network(
os_conn,
self.net_data[0].keys()[0], tenant_name=SERVTEST_TENANT
)
logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))
subnet = openstack.create_subnet(
os_conn,
network,
self.net_data[0][self.net_data[0].keys()[0]],
tenant_name=SERVTEST_TENANT
)
# Check that network are created.
assert_true(
os_conn.get_network(network['name'])['id'] == network['id']
)
# create security group with rules for ssh and ping
security_group = {}
security_group[os_conn.get_tenant(SERVTEST_TENANT).id] =\
os_conn.create_sec_group_for_ssh()
security_group = security_group[
os_conn.get_tenant(SERVTEST_TENANT).id].id
# Launch instance VM_1, VM_2 in the tenant network net_01
# with image TestVMDK and flavor m1.micro in the nova az.
# Launch instances VM_3 and VM_4 in the net01
# with image TestVM-VMDK and flavor m1.micro in vcenter az.
openstack.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': network['id']}], security_group=security_group
)
# Add net_1 to default router
router = os_conn.get_router(os_conn.get_network(self.ext_net_name))
openstack.add_subnet_to_router(
os_conn,
router['id'], subnet['id'])
openstack.create_and_assign_floating_ip(os_conn=os_conn)
# Send ping from instances VM_1 and VM_2 to 8.8.8.8
# or other outside ip.
srv_list = os_conn.get_servers()
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh_controller = self.fuel_web.get_ssh_for_node(
primary_controller.name)
openstack.check_connection_vms(
os_conn=os_conn, srv_list=srv_list, remote=ssh_controller,
destination_ip=['8.8.8.8']
)
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_5_instances", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_5_instances(self):
"""Check creation instance in the one group simultaneously
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Create 5 instances of vcenter and 5 of nova simultaneously.
Duration 15 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Create 5 instances of vcenter and 5 of nova simultaneously.
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
network = os_conn.nova.networks.find(label=self.inter_net_name)
openstack.create_instances(
os_conn=os_conn, vm_count=5,
nics=[{'net-id': network.id}])
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_security", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_security(self):
"""Check abilities to create and delete security group.
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Create non default network with subnet net01.
3. Launch 2 instances of nova az
in the tenant network net_01
4. Launch 2 instances of vcenter az
in the tenant net04.
5. Create security groups SG_1 to allow ICMP traffic.
6. Add Ingress rule for ICMP protocol to SG_1
7. Create security groups SG_2 to allow TCP traffic 22 port.
8. Add Ingress rule for TCP protocol to SG_2
9. Remove defauld security group and attach SG_1 and SG2 to VMs
10. Check ssh between VMs
11. Check ping between VMs
12. Delete all rules from SG_1 and SG_2
13. Check ssh are not available to VMs
and vice verse
14. Add Ingress rule for TCP protocol to SG_2
15. Add Ingress rule for ICMP protocol to SG_1
16. Check ping between VMs and vice verse
17. Check SSH between VMs
18. Delete security groups.
19. Attach Vms to default security group.
20. Check ssh are not available to VMs.
Duration 30 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Connect to cluster
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# Create non default network with subnet.
logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
network = openstack.create_network(
os_conn,
self.net_data[0].keys()[0],
tenant_name=SERVTEST_TENANT
)
logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))
subnet = openstack.create_subnet(
os_conn,
network,
self.net_data[0][self.net_data[0].keys()[0]],
tenant_name=SERVTEST_TENANT
)
# Check that network are created.
assert_true(
os_conn.get_network(network['name'])['id'] == network['id']
)
# Add net_1 to default router
router = os_conn.get_router(os_conn.get_network(self.ext_net_name))
openstack.add_subnet_to_router(
os_conn,
router['id'], subnet['id'])
# Launch instance 2 VMs of vcenter and 2 VMs of nova
# in the tenant network net_01
openstack.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': network['id']}]
)
# Launch instance 2 VMs of vcenter and 2 VMs of nova
# in the tenant network net04
network = os_conn.nova.networks.find(label=self.inter_net_name)
openstack.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': network.id}])
openstack.create_and_assign_floating_ip(os_conn=os_conn)
# Create security groups SG_1 to allow ICMP traffic.
# Add Ingress rule for ICMP protocol to SG_1
# Create security groups SG_2 to allow TCP traffic 22 port.
# Add Ingress rule for TCP protocol to SG_2
sec_name = ['SG1', 'SG2']
sg1 = os_conn.nova.security_groups.create(
sec_name[0], "descr")
sg2 = os_conn.nova.security_groups.create(
sec_name[1], "descr")
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
tcp = os_conn.nova.security_group_rules.create(
sg1.id, **rulesets[0]
)
icmp = os_conn.nova.security_group_rules.create(
sg2.id, **rulesets[1]
)
# Remove defauld security group and attach SG_1 and SG2 to VMs
srv_list = os_conn.get_servers()
for srv in srv_list:
srv.remove_security_group(srv.security_groups[0]['name'])
srv.add_security_group(sg1.id)
srv.add_security_group(sg2.id)
time.sleep(20) # need wait to update rules on dvs
# SSh to VMs
# Check ping between VMs
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh_controller = self.fuel_web.get_ssh_for_node(
primary_controller.name)
openstack.check_connection_vms(os_conn=os_conn, srv_list=srv_list,
remote=ssh_controller)
# Delete all rules from SG_1 and SG_2
os_conn.nova.security_group_rules.delete(tcp.id)
os_conn.nova.security_group_rules.delete(icmp.id)
# Check ssh are not available between VMs
# and vice verse
try:
openstack.check_connection_vms(
os_conn=os_conn, srv_list=srv_list, remote=ssh_controller)
except Exception as e:
logger.info('{}'.format(e))
tcp = os_conn.nova.security_group_rules.create(
sg1.id, **rulesets[0]
)
time.sleep(20) # need wait to update rules on dvs
# Check ping are not available between VMs
srv_list = os_conn.get_servers()
openstack.check_connection_vms(os_conn=os_conn, srv_list=srv_list,
remote=ssh_controller, result_of_ping=1)
icmp = os_conn.nova.security_group_rules.create(
sg2.id, **rulesets[1]
)
time.sleep(20) # need wait to update rules on dvs
# Check ping are not available between VMs
openstack.check_connection_vms(
os_conn=os_conn, srv_list=srv_list, remote=ssh_controller)
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_tenants_isolation", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_tenants_isolation(self):
"""Verify that VMs on different tenants should not communicate
between each other. Send icmp ping from VMs
of admin tenant to VMs of test_tenant and vice versa.
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Create non-admin tenant.
3. Create network net01 with subnet in non-admin tenant
4. Create Router_01, set gateway and add interface
to external network.
5. Launch 2 instances in the net01(non-admin network)
in nova and vcenter az.
6. Launch 2 instances in the default internal
admin network in nova and vcenter az.
7. Verify that VMs on different tenants should not communicate
between each other via no floating ip. Send icmp ping from VM_3,
VM_4 of admin tenant to VM_3 VM_4 of test_tenant and vice versa.
Duration 30 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
# Create new network
os_ip = self.fuel_web.get_public_vip(cluster_id)
admin = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# Create non-admin tenant.
admin.create_user_and_tenant('test', 'test', 'test')
openstack.add_role_to_user(admin, 'test', 'admin', 'test')
test = os_actions.OpenStackActions(
os_ip, 'test', 'test', 'test')
# Create non default network with subnet in test tenant.
logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
network = openstack.create_network(
test,
self.net_data[0].keys()[0], tenant_name='test'
)
logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))
subnet = openstack.create_subnet(
test,
network,
self.net_data[0][self.net_data[0].keys()[0]],
tenant_name='test'
)
# create security group with rules for ssh and ping
security_group = {}
security_group[test.get_tenant('test').id] =\
test.create_sec_group_for_ssh()
security_group = security_group[
test.get_tenant('test').id].id
# Launch 2 instances in the est tenant network net_01
openstack.create_instances(
os_conn=test, vm_count=1,
nics=[{'net-id': network['id']}], security_group=security_group
)
# Create Router_01, set gateway and add interface
# to external network.
router_1 = openstack.add_router(
test,
'router_1'
)
# Add net_1 to router_1
openstack.add_subnet_to_router(
test,
router_1['id'], subnet['id'])
# create security group with rules for ssh and ping
security_group = {}
security_group[admin.get_tenant(SERVTEST_TENANT).id] =\
admin.create_sec_group_for_ssh()
security_group = security_group[
admin.get_tenant(SERVTEST_TENANT).id].id
# Launch 2 instances in the admin tenant net04
network = admin.nova.networks.find(label=self.inter_net_name)
openstack.create_instances(
os_conn=admin, vm_count=1,
nics=[{'net-id': network.id}], security_group=security_group)
# Send ping from instances VM_1 and VM_2 to VM_3 and VM_4
# via no floating ip
srv_1 = admin.get_servers()
srv_2 = test.get_servers()
openstack.create_and_assign_floating_ip(os_conn=admin, srv_list=srv_1)
openstack.create_and_assign_floating_ip(
os_conn=test,
srv_list=srv_2,
ext_net=None,
tenant_id=test.get_tenant('test').id)
srv_1 = admin.get_servers()
srv_2 = test.get_servers()
ips = []
for srv in srv_2:
ip = srv.networks[srv.networks.keys()[0]][0]
ips.append(ip)
logger.info(ips)
logger.info(srv_1)
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh_controller = self.fuel_web.get_ssh_for_node(
primary_controller.name)
openstack.check_connection_vms(
os_conn=admin, srv_list=srv_1,
result_of_ping=1,
remote=ssh_controller, destination_ip=ips
)
@test(depends_on=[dvs_vcenter_systest_setup],
groups=["dvs_vcenter_same_ip", 'dvs_vcenter_system'])
@log_snapshot_after_test
def dvs_vcenter_same_ip(self):
"""Check connectivity between VMs with same ip in different tenants.
Scenario:
1. Revert snapshot to dvs_vcenter_systest_setup.
2. Create non-admin tenant.
3. Create private network net01 with sunet in non-admin tenant.
4. Create Router_01, set gateway and add interface
to external network.
5. Create private network net01 with sunet in default admin tenant
6. Create Router_01, set gateway and add interface
to external network.
7. Launch instances VM_1 and VM_2 in the net01(non-admin tenant)
with image TestVM and flavor m1.micro in nova az.
8. Launch instances VM_3 and VM_4 in the net01(non-admin tenant)
with image TestVM-VMDK and flavor m1.micro in vcenter az.
9. Launch instances VM_5 and VM_6
in the net01(default admin tenant)
with image TestVM and flavor m1.micro in nova az.
10. Launch instances VM_7 and VM_8
in the net01(default admin tenant)
with image TestVM-VMDK and flavor m1.micro in vcenter az.
11. Verify that VM_1, VM_2, VM_3 and VM_4 should communicate
between each other via no floating ip.
12. Verify that VM_5, VM_6, VM_7 and VM_8 should communicate
between each other via no floating ip.
Duration 30 min
"""
self.env.revert_snapshot("dvs_vcenter_systest_setup")
cluster_id = self.fuel_web.get_last_created_cluster()
os_ip = self.fuel_web.get_public_vip(cluster_id)
admin = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# Create non-admin tenant.
admin.create_user_and_tenant('test', 'test', 'test')
openstack.add_role_to_user(admin, 'test', 'admin', 'test')
test = os_actions.OpenStackActions(
os_ip, 'test', 'test', 'test')
# Create non default network with subnet in test tenant.
logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
network = openstack.create_network(
test,
self.net_data[0].keys()[0], tenant_name='test'
)
logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))
subnet = openstack.create_subnet(
test,
network,
self.net_data[0][self.net_data[0].keys()[0]],
tenant_name='test'
)
# create security group with rules for ssh and ping
security_group = {}
security_group[test.get_tenant('test').id] =\
test.create_sec_group_for_ssh()
security_group = security_group[
test.get_tenant('test').id].id
# Launch instances VM_1 and VM_2 in the net01(non-admin tenant)
# with image TestVM and flavor m1.micro in nova az.
# Launch instances VM_3 and VM_4 in the net01(non-admin tenant)
# with image TestVM-VMDK and flavor m1.micro in vcenter az.
openstack.create_instances(
os_conn=test, vm_count=1,
nics=[{'net-id': network['id']}], security_group=security_group
)
# Create Router_01, set gateway and add interface
# to external network.
router_1 = openstack.add_router(
test,
'router_1',
ext_net_name=self.ext_net_name, tenant_name='test'
)
# Add net_1 to router_1
openstack.add_subnet_to_router(
test,
router_1['id'], subnet['id'])
srv_1 = test.get_servers()
openstack.create_and_assign_floating_ip(
os_conn=test,
srv_list=srv_1,
ext_net=None,
tenant_id=test.get_tenant('test').id)
srv_1 = test.get_servers()
# create security group with rules for ssh and ping
security_group = {}
security_group[admin.get_tenant(SERVTEST_TENANT).id] =\
admin.create_sec_group_for_ssh()
security_group = security_group[
admin.get_tenant(SERVTEST_TENANT).id].id
# Create non default network with subnet in admin tenant.
logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
network = openstack.create_network(
admin,
self.net_data[0].keys()[0])
logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))
subnet = openstack.create_subnet(
admin,
network,
self.net_data[0][self.net_data[0].keys()[0]])
# Launch instances VM_5 and VM_6
# in the net01(default admin tenant)
# with image TestVM and flavor m1.micro in nova az.
# Launch instances VM_7 and VM_8
# in the net01(default admin tenant)
# with image TestVM-VMDK and flavor m1.micro in vcenter az.
openstack.create_instances(
os_conn=admin, vm_count=1,
nics=[{'net-id': network['id']}], security_group=security_group)
# Create Router_01, set gateway and add interface
# to external network.
router_1 = openstack.add_router(
admin,
'router_1')
# Add net_1 to router_1
openstack.add_subnet_to_router(
admin,
router_1['id'], subnet['id'])
# Send ping between instances
# via no floating ip
srv_2 = admin.get_servers()
openstack.create_and_assign_floating_ip(
os_conn=admin,
srv_list=srv_2)
srv_2 = admin.get_servers()
# Verify that VM_1, VM_2, VM_3 and VM_4 should communicate
# between each other via fixed ip.
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh_controller = self.fuel_web.get_ssh_for_node(
primary_controller.name)
openstack.check_connection_vms(
os_conn=test, srv_list=srv_1, remote=ssh_controller)
# Verify that VM_5, VM_6, VM_7 and VM_8 should communicate
# between each other via fixed ip.
openstack.check_connection_vms(os_conn=admin, srv_list=srv_2,
remote=ssh_controller)