Smoke and bvt tests
* Add smoke test * Add bvt test * Fix shellchecks and flake8 errors * Added script for the preparation of interfaces to passing stt traffic Prerequisites: ** Interface number 4 in order on openstack nodes (enp0s6) was free ** In the plugin for stt traffic has been selected interface number 4 (enp0s6) Change-Id: Icf9df8c10216990871a51bfcd18ea7c8e316435f
This commit is contained in:
committed by
Andrey Setyaev
parent
9be55a15fb
commit
f9ceda4701
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[submodule "plugin_test/fuel-qa"]
|
||||||
|
path = plugin_test/fuel-qa
|
||||||
|
url = https://github.com/openstack/fuel-qa
|
||||||
|
branch = stable/mitaka
|
||||||
14
plugin_test/__init__.py
Normal file
14
plugin_test/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
1
plugin_test/fuel-qa
Submodule
1
plugin_test/fuel-qa
Submodule
Submodule plugin_test/fuel-qa added at 15681971a6
14
plugin_test/helpers/__init__.py
Normal file
14
plugin_test/helpers/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
282
plugin_test/helpers/openstack.py
Normal file
282
plugin_test/helpers/openstack.py
Normal file
@@ -0,0 +1,282 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from keystoneclient.exceptions import Conflict
|
||||||
|
from keystoneclient.exceptions import NotFound
|
||||||
|
|
||||||
|
from fuelweb_test import logger
|
||||||
|
from fuelweb_test import settings as fw_settings
|
||||||
|
|
||||||
|
from fuelweb_test.helpers.common import Common
|
||||||
|
|
||||||
|
from helpers.tools import find_first
|
||||||
|
from helpers.tools import ShowPos
|
||||||
|
|
||||||
|
|
||||||
|
def get_openstack_list_paginator(page_size=10):
|
||||||
|
"""Retrieve information from openstack via pagination.
|
||||||
|
|
||||||
|
NOTE: This decorator is not applicable for:
|
||||||
|
* cinder.volume_snaphosts.list
|
||||||
|
* glance.images.list
|
||||||
|
* keystone.users.list
|
||||||
|
* neutron.list_networks
|
||||||
|
* neutron.list_subnets
|
||||||
|
* neutron.list_routers
|
||||||
|
* neutron.list_ports
|
||||||
|
because 'limit' is not working or not implemented in their API methods.
|
||||||
|
"""
|
||||||
|
assert callable(page_size) is False, 'This is not a decorator'
|
||||||
|
assert type(page_size) is int, 'page_size is not Int'
|
||||||
|
|
||||||
|
def paginator(fn):
|
||||||
|
def under_page(*args, **kwargs):
|
||||||
|
# Process listing as a regular call due to limit or
|
||||||
|
# marker parameters were passed
|
||||||
|
limit = kwargs.pop('limit', None)
|
||||||
|
marker = kwargs.pop('marker', None)
|
||||||
|
if limit or marker:
|
||||||
|
res = fn(*args, limit=limit, marker=marker, **kwargs)
|
||||||
|
for item in res:
|
||||||
|
yield item
|
||||||
|
return
|
||||||
|
# Process listing with a pagination
|
||||||
|
last_item_id = None
|
||||||
|
while True:
|
||||||
|
items = fn(*args, limit=page_size,
|
||||||
|
marker=last_item_id, **kwargs)
|
||||||
|
if not items:
|
||||||
|
break
|
||||||
|
for item in items:
|
||||||
|
if type(item) is dict:
|
||||||
|
last_item_id = item['id']
|
||||||
|
else:
|
||||||
|
last_item_id = item.id
|
||||||
|
yield item
|
||||||
|
|
||||||
|
def paged_requester(*args, **kwargs):
|
||||||
|
return [item for item in under_page(*args, **kwargs)]
|
||||||
|
|
||||||
|
# return under_page
|
||||||
|
return paged_requester
|
||||||
|
return paginator
|
||||||
|
|
||||||
|
|
||||||
|
class HopenStack(ShowPos):
|
||||||
|
"""HOpenStack - Helpers for OpenStack."""
|
||||||
|
|
||||||
|
def __init__(self, nsxv_ip, user=None, password=None, tenant=None):
|
||||||
|
"""Init Common.
|
||||||
|
|
||||||
|
:param nsxv_ip: controller ip
|
||||||
|
"""
|
||||||
|
user = user or fw_settings.SERVTEST_USERNAME
|
||||||
|
password = password or fw_settings.SERVTEST_PASSWORD
|
||||||
|
tenant = tenant or fw_settings.SERVTEST_TENANT
|
||||||
|
self._common = Common(controller_ip=nsxv_ip,
|
||||||
|
user=user,
|
||||||
|
password=password,
|
||||||
|
tenant=tenant)
|
||||||
|
|
||||||
|
def create_network(self, name):
|
||||||
|
"""Create network.
|
||||||
|
|
||||||
|
:param name: name of network
|
||||||
|
:return: dict with network info
|
||||||
|
"""
|
||||||
|
request_body = {'network': {"name": name}}
|
||||||
|
network = self._common.neutron.create_network(body=request_body)
|
||||||
|
|
||||||
|
network_id = network['network']['id']
|
||||||
|
logger.debug("Created network id '{0}'".format(network_id))
|
||||||
|
|
||||||
|
return network['network']
|
||||||
|
|
||||||
|
def create_subnetwork(self, network, cidr):
|
||||||
|
"""Create a subnet.
|
||||||
|
|
||||||
|
:param network: dictionary
|
||||||
|
:param cidr: string CIDR
|
||||||
|
:return: dict with subnet info
|
||||||
|
"""
|
||||||
|
subnet_params = {
|
||||||
|
"subnet": {"network_id": network['id'],
|
||||||
|
"ip_version": 4,
|
||||||
|
"cidr": cidr,
|
||||||
|
"name": 'subnet_{}'.format(
|
||||||
|
network['name']),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
subnet = self._common.neutron.create_subnet(subnet_params)['subnet']
|
||||||
|
logger.debug("Created sub network id '{0}'".format(subnet['id']))
|
||||||
|
|
||||||
|
return subnet
|
||||||
|
|
||||||
|
def aggregate_create(self, name, availability_zone):
|
||||||
|
"""Create a aggregate.
|
||||||
|
|
||||||
|
:param name: aggregate name
|
||||||
|
:param availability_zone: availability zone name
|
||||||
|
:return: answer on create aggregation request
|
||||||
|
"""
|
||||||
|
return self._common.nova.aggregates.create(name, availability_zone)
|
||||||
|
|
||||||
|
def aggregate_host_remove(self, aggregate, hostname):
|
||||||
|
"""Remove host from aggregate.
|
||||||
|
|
||||||
|
:param aggregate: aggregate ID
|
||||||
|
:param hostname: host name
|
||||||
|
:return: answer on remove_host request
|
||||||
|
"""
|
||||||
|
return self._common.nova.aggregates.remove_host(aggregate, hostname)
|
||||||
|
|
||||||
|
def aggregate_host_add(self, aggregate, hostname):
|
||||||
|
"""Add host to aggregate.
|
||||||
|
|
||||||
|
:param aggregate: destination aggregate ID
|
||||||
|
:param hostname: host name
|
||||||
|
:return: answer on add_host request
|
||||||
|
"""
|
||||||
|
return self._common.nova.aggregates.add_host(aggregate, hostname)
|
||||||
|
|
||||||
|
def aggregate_list(self):
|
||||||
|
"""Retrieve aggregates list.
|
||||||
|
|
||||||
|
:return: Aggregate objects list
|
||||||
|
"""
|
||||||
|
return self._common.nova.aggregates.list()
|
||||||
|
|
||||||
|
def hosts_list(self, zone=None):
|
||||||
|
"""Retrieve hosts list.
|
||||||
|
|
||||||
|
:param zone: availability zone name, optional parameter
|
||||||
|
:return: Host objects list
|
||||||
|
"""
|
||||||
|
return self._common.nova.hosts.list(zone=zone)
|
||||||
|
|
||||||
|
def hosts_change_aggregate(self, agg_src, agg_dst, hostname):
|
||||||
|
"""Move host from one aggregate to another.
|
||||||
|
|
||||||
|
:param agg_src: Source aggregate ID
|
||||||
|
:param agg_dst: Destination aggregate ID
|
||||||
|
:param hostname: Host name
|
||||||
|
"""
|
||||||
|
aggs = self.aggregate_list()
|
||||||
|
agg_src_id = None
|
||||||
|
agg_dst_id = None
|
||||||
|
for agg in aggs:
|
||||||
|
if agg.name == agg_src:
|
||||||
|
agg_src_id = agg.id
|
||||||
|
if agg.name == agg_dst:
|
||||||
|
agg_dst_id = agg.id
|
||||||
|
if agg_src_id is not None and agg_dst_id is not None:
|
||||||
|
self.aggregate_host_remove(agg_src_id, hostname)
|
||||||
|
self.aggregate_host_add(agg_dst_id, hostname)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
"Aggregate not found. agg_src id:{0}, agg_dst id:{1}".format(
|
||||||
|
agg_src_id, agg_dst_id
|
||||||
|
))
|
||||||
|
|
||||||
|
def role_get(self, role_name):
|
||||||
|
"""Get user role by name.
|
||||||
|
|
||||||
|
:param role_name: string role name
|
||||||
|
:return role: dict with role description
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
role = self._common.keystone.roles.find(name=role_name)
|
||||||
|
except NotFound:
|
||||||
|
return None
|
||||||
|
return role
|
||||||
|
|
||||||
|
def user_get(self, username):
|
||||||
|
"""Get user by user name.
|
||||||
|
|
||||||
|
:param username: string user name
|
||||||
|
:return role: dict with user description
|
||||||
|
"""
|
||||||
|
user = find_first(self._common.keystone.users.list(),
|
||||||
|
lambda x: x.name == username)
|
||||||
|
return user
|
||||||
|
|
||||||
|
@get_openstack_list_paginator() # use pagination with defalt page size
|
||||||
|
def tenants_list(self, limit=None, marker=None):
|
||||||
|
"""List tenants.
|
||||||
|
|
||||||
|
:param limit: how much tenants to be listed
|
||||||
|
:param marker: from which position list the tenants
|
||||||
|
:return tenants: list with tenants
|
||||||
|
"""
|
||||||
|
tenants = self._common.keystone.tenants.list(limit=limit,
|
||||||
|
marker=marker)
|
||||||
|
return tenants
|
||||||
|
|
||||||
|
def tenants_create(self, tenant_name):
|
||||||
|
"""Create tenant with given name.
|
||||||
|
|
||||||
|
:param tenant_name: name of tenant
|
||||||
|
:return tenant: dict with tenant details
|
||||||
|
"""
|
||||||
|
tenant = find_first(self.tenants_list(),
|
||||||
|
lambda x: x.name == tenant_name)
|
||||||
|
if tenant is None:
|
||||||
|
tenant = self._common.keystone.tenants.create(
|
||||||
|
tenant_name=tenant_name,
|
||||||
|
enabled=True)
|
||||||
|
logger.info("Created tenant name:'{0}', id:'{1}'".format(
|
||||||
|
tenant_name, tenant.id))
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Tenant already exist, name: '{0}' id: '{1}'".format(
|
||||||
|
tenant_name, tenant.id))
|
||||||
|
return tenant
|
||||||
|
|
||||||
|
def tenant_assign_user_role(self, tenant, user, role):
|
||||||
|
"""Link tenant with user ad role.
|
||||||
|
|
||||||
|
:param tenant: name of tenant
|
||||||
|
:param user: name of user
|
||||||
|
:param user: name of role
|
||||||
|
:return res: dict keystone answer
|
||||||
|
"""
|
||||||
|
res = self._common.keystone.roles.add_user_role(user, role, tenant)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def security_group_create(self, name, description=''):
|
||||||
|
"""Create security group.
|
||||||
|
|
||||||
|
:param name: name of security group
|
||||||
|
:param description: string with description
|
||||||
|
:return: security group object
|
||||||
|
"""
|
||||||
|
return self._common.nova.security_groups.create(name, description)
|
||||||
|
|
||||||
|
def security_group_add_rule(self, tenant_id, group):
|
||||||
|
"""Add rule to security group.
|
||||||
|
|
||||||
|
:param tenant_id: tenant id
|
||||||
|
:param group: security group object
|
||||||
|
"""
|
||||||
|
body = {'security_group_rule': {'direction': 'ingress',
|
||||||
|
'security_group_id': group.id,
|
||||||
|
'tenant_id': tenant_id}
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
self._common.neutron.create_security_group_rule(body=body)
|
||||||
|
except Conflict as e:
|
||||||
|
logger.warning(
|
||||||
|
"Can't create rule for tenant {0}. Exception: {1}".format(
|
||||||
|
tenant_id, e))
|
||||||
71
plugin_test/helpers/settings.py
Normal file
71
plugin_test/helpers/settings.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from fuelweb_test.settings import get_var_as_bool
|
||||||
|
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
|
||||||
|
|
||||||
|
|
||||||
|
HALF_MIN_WAIT = 30 # 30 seconds
|
||||||
|
WAIT_FOR_COMMAND = 60 * 3 # 3 minutes
|
||||||
|
WAIT_FOR_LONG_DEPLOY = 60 * 180 # 180 minutes
|
||||||
|
|
||||||
|
EXT_IP = '8.8.8.8' # Google DNS ^_^
|
||||||
|
PRIVATE_NET = "admin_internal_net"
|
||||||
|
ADMIN_NET = 'admin_floating_net'
|
||||||
|
DEFAULT_ROUTER_NAME = 'router04'
|
||||||
|
METADATA_IP = '169.254.169.254'
|
||||||
|
VM_USER = 'cirros'
|
||||||
|
VM_PASS = 'cubswin:)'
|
||||||
|
AZ_VCENTER1 = 'vcenter'
|
||||||
|
AZ_VCENTER2 = 'vcenter2'
|
||||||
|
FLAVOR_NAME = 'm1.micro128'
|
||||||
|
CERT_FILE = "plugin_test/certificates/certificate.pem"
|
||||||
|
KEY_FILE = "plugin_test/certificates/key.pem"
|
||||||
|
CN = "metadata.nsx.local"
|
||||||
|
|
||||||
|
|
||||||
|
NSXT_PLUGIN_PATH = os.environ.get('NSXT_PLUGIN_PATH')
|
||||||
|
|
||||||
|
cluster_settings = {'net_provider': 'neutron',
|
||||||
|
'assign_to_all_nodes': True,
|
||||||
|
'net_segment_type': NEUTRON_SEGMENT_TYPE
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin_configuration = {
|
||||||
|
'insecure/value': get_var_as_bool(os.environ.get('NSXT_INSECURE'), True),
|
||||||
|
'nsx_api_managers/value': os.environ.get('NSXT_MANAGERS_IP'),
|
||||||
|
'nsx_api_user/value': os.environ.get('NSXT_USER'),
|
||||||
|
'nsx_api_password/value': os.environ.get('NSXT_PASSWORD'),
|
||||||
|
'default_overlay_tz_uuid/value': os.environ.get('NSXT_OVERLAY_TZ_UUID'),
|
||||||
|
'default_vlan_tz_uuid/value': os.environ.get('NSXT_VLAN_TZ_UUID'),
|
||||||
|
'default_tier0_router_uuid/value': os.environ.get(
|
||||||
|
'NSXT_TIER0_ROUTER_UUID'),
|
||||||
|
'default_edge_cluster_uuid/value': os.environ.get(
|
||||||
|
'NSXT_EDGE_CLUSTER_UUID'),
|
||||||
|
'uplink_profile_uuid/value': os.environ.get('NSXT_UPLINK_PROFILE_UUID'),
|
||||||
|
'controller_ip_pool_uuid/value': os.environ.get(
|
||||||
|
'NSXT_CONTROLLER_IP_POOL_UUID'),
|
||||||
|
'controller_pnics_pairs/value':
|
||||||
|
os.environ.get('NSXT_CONTROLLER_PNICS_PAIRS'),
|
||||||
|
'compute_ip_pool_uuid/value': os.environ.get('NSXT_COMPUTE_IP_POOL_UUID'),
|
||||||
|
'compute_pnics_pairs/value': os.environ.get('NSXT_COMPUTE_PNICS_PAIRS'),
|
||||||
|
'floating_ip_range/value': os.environ.get('NSXT_FLOATING_IP_RANGE'),
|
||||||
|
'floating_net_cidr/value': os.environ.get('NSXT_FLOATING_NET_CIDR'),
|
||||||
|
'internal_net_cidr/value': os.environ.get('NSXT_INTERNAL_NET_CIDR'),
|
||||||
|
'floating_net_gw/value': os.environ.get('NSXT_FLOATING_NET_GW'),
|
||||||
|
'internal_net_dns/value': os.environ.get('NSXT_INTERNAL_NET_DNS'),
|
||||||
|
}
|
||||||
54
plugin_test/helpers/tools.py
Normal file
54
plugin_test/helpers/tools.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
from fuelweb_test import logger
|
||||||
|
|
||||||
|
|
||||||
|
def find_first(seq, predicate):
|
||||||
|
"""Find next item from sequence."""
|
||||||
|
return next((x for x in seq if predicate(x)), None)
|
||||||
|
|
||||||
|
|
||||||
|
class ShowPos(object):
|
||||||
|
"""Print func name and its parameters for each call."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def deco(f):
|
||||||
|
"""Logger decorator."""
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
logger.debug("Call {0}({1}, {2})".format(f.__name__, args, kwargs))
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def __getattribute__(self, name):
|
||||||
|
"""Log by attributes."""
|
||||||
|
attr = object.__getattribute__(self, name)
|
||||||
|
if callable(attr):
|
||||||
|
return ShowPos.deco(attr)
|
||||||
|
else:
|
||||||
|
return attr
|
||||||
|
|
||||||
|
|
||||||
|
def show_pos(f):
|
||||||
|
"""Wrapper shows current POSition in debug output."""
|
||||||
|
@wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
logger.debug('Call {func}({args}, {kwargs})'.format(func=f.__name__,
|
||||||
|
args=args,
|
||||||
|
kwargs=kwargs))
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
67
plugin_test/run_tests.py
Normal file
67
plugin_test/run_tests.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from nose.plugins import Plugin
|
||||||
|
from paramiko.transport import _join_lingering_threads
|
||||||
|
|
||||||
|
|
||||||
|
class CloseSSHConnectionsPlugin(Plugin):
|
||||||
|
"""Closes all paramiko's ssh connections after each test case.
|
||||||
|
|
||||||
|
Plugin fixes proboscis disability to run cleanup of any kind.
|
||||||
|
'afterTest' calls _join_lingering_threads function from paramiko,
|
||||||
|
which stops all threads (set the state to inactive and joins for 10s)
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'closesshconnections'
|
||||||
|
|
||||||
|
def options(self, parser, env=os.environ):
|
||||||
|
super(CloseSSHConnectionsPlugin, self).options(parser, env=env)
|
||||||
|
|
||||||
|
def configure(self, options, conf):
|
||||||
|
super(CloseSSHConnectionsPlugin, self).configure(options, conf)
|
||||||
|
self.enabled = True
|
||||||
|
|
||||||
|
def afterTest(self, *args, **kwargs):
|
||||||
|
_join_lingering_threads()
|
||||||
|
|
||||||
|
|
||||||
|
def import_tests():
|
||||||
|
from tests import test_plugin_nsxv
|
||||||
|
|
||||||
|
|
||||||
|
def run_tests():
|
||||||
|
from proboscis import TestProgram # noqa
|
||||||
|
import_tests()
|
||||||
|
|
||||||
|
# Run Proboscis and exit.
|
||||||
|
TestProgram(
|
||||||
|
addplugins=[CloseSSHConnectionsPlugin()]
|
||||||
|
).run_and_exit()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.path.append(sys.path[0] + "/fuel-qa")
|
||||||
|
import_tests()
|
||||||
|
from fuelweb_test.helpers.patching import map_test
|
||||||
|
if any(re.search(r'--group=patching_master_tests', arg)
|
||||||
|
for arg in sys.argv):
|
||||||
|
map_test('master')
|
||||||
|
elif any(re.search(r'--group=patching.*', arg) for arg in sys.argv):
|
||||||
|
map_test('environment')
|
||||||
|
run_tests()
|
||||||
14
plugin_test/tests/__init__.py
Normal file
14
plugin_test/tests/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
215
plugin_test/tests/test_plugin_nsxv.py
Normal file
215
plugin_test/tests/test_plugin_nsxv.py
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from proboscis import test
|
||||||
|
|
||||||
|
from proboscis.asserts import assert_true
|
||||||
|
|
||||||
|
from fuelweb_test.helpers import utils
|
||||||
|
|
||||||
|
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||||
|
from fuelweb_test.settings import DEPLOYMENT_MODE
|
||||||
|
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||||
|
from fuelweb_test.tests.base_test_case import TestBasic
|
||||||
|
from helpers import settings as pt_settings # Plugin Tests Settings
|
||||||
|
|
||||||
|
|
||||||
|
@test(groups=["plugins", "nsxt_plugin"])
|
||||||
|
class TestNSXtPlugin(TestBasic):
|
||||||
|
"""Here are automated tests from test plan that has mark 'Automated'."""
|
||||||
|
|
||||||
|
_common = None
|
||||||
|
plugin_name = 'nsx-t'
|
||||||
|
plugin_version = '1.0.0'
|
||||||
|
|
||||||
|
def install_nsxt_plugin(self):
|
||||||
|
"""Install plugin on fuel node."""
|
||||||
|
utils.upload_tarball(
|
||||||
|
ip=self.ssh_manager.admin_ip,
|
||||||
|
tar_path=pt_settings.NSXT_PLUGIN_PATH,
|
||||||
|
tar_target='/var')
|
||||||
|
|
||||||
|
utils.install_plugin_check_code(
|
||||||
|
ip=self.ssh_manager.admin_ip,
|
||||||
|
plugin=os.path.basename(pt_settings.NSXT_PLUGIN_PATH))
|
||||||
|
|
||||||
|
def node_name(self, name_node):
|
||||||
|
"""Return name of node."""
|
||||||
|
return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']
|
||||||
|
|
||||||
|
def enable_plugin(self, cluster_id, settings={}):
|
||||||
|
"""Fill the necessary fields with required values.
|
||||||
|
|
||||||
|
:param cluster_id: cluster id to use with Common
|
||||||
|
:param settings: dict that will be merged with default settings
|
||||||
|
"""
|
||||||
|
assert_true(
|
||||||
|
self.fuel_web.check_plugin_exists(cluster_id, self.plugin_name),
|
||||||
|
"Test aborted")
|
||||||
|
|
||||||
|
# Update plugin settings
|
||||||
|
self.fuel_web.update_plugin_settings(
|
||||||
|
cluster_id,
|
||||||
|
self.plugin_name,
|
||||||
|
self.plugin_version,
|
||||||
|
dict(pt_settings.plugin_configuration, **settings))
|
||||||
|
|
||||||
|
@test(depends_on=[SetupEnvironment.prepare_slaves_1],
|
||||||
|
groups=["nsxt_smoke"])
|
||||||
|
@log_snapshot_after_test
|
||||||
|
def nsxt_smoke(self):
|
||||||
|
"""Deploy a cluster with NSXt Plugin.
|
||||||
|
|
||||||
|
Scenario:
|
||||||
|
1. Upload the plugin to master node.
|
||||||
|
2. Create cluster.
|
||||||
|
3. Provision one controller node.
|
||||||
|
4. Configure NSXt for that cluster.
|
||||||
|
5. Deploy cluster with plugin.
|
||||||
|
6. Run 'smoke' OSTF.
|
||||||
|
|
||||||
|
Duration 90 min
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.env.revert_snapshot('ready_with_1_slaves')
|
||||||
|
|
||||||
|
self.install_nsxt_plugin()
|
||||||
|
|
||||||
|
# Configure cluster
|
||||||
|
cluster_id = self.fuel_web.create_cluster(
|
||||||
|
name=self.__class__.__name__,
|
||||||
|
mode=DEPLOYMENT_MODE,
|
||||||
|
settings=pt_settings.cluster_settings,
|
||||||
|
configure_ssl=False)
|
||||||
|
|
||||||
|
# Assign roles to nodes
|
||||||
|
self.fuel_web.update_nodes(
|
||||||
|
cluster_id,
|
||||||
|
{'slave-01': ['controller'], })
|
||||||
|
|
||||||
|
self.enable_plugin(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.deploy_cluster_wait(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.run_ostf(
|
||||||
|
cluster_id=cluster_id,
|
||||||
|
test_sets=['smoke'])
|
||||||
|
|
||||||
|
@test(depends_on=[SetupEnvironment.prepare_slaves_1],
|
||||||
|
groups=["nsxt_vcenter_smoke"])
|
||||||
|
@log_snapshot_after_test
|
||||||
|
def nsxt_vcenter_smoke(self):
|
||||||
|
"""Deploy a cluster with NSXt Plugin.
|
||||||
|
|
||||||
|
Scenario:
|
||||||
|
1. Upload the plugin to master node.
|
||||||
|
2. Create cluster.
|
||||||
|
3. Provision one controller node.
|
||||||
|
4. Configure vcenter.
|
||||||
|
5. Configure NSXt for that cluster.
|
||||||
|
6. Deploy cluster with plugin.
|
||||||
|
7. Run 'smoke' OSTF.
|
||||||
|
|
||||||
|
Duration 90 min
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.env.revert_snapshot('ready_with_1_slaves')
|
||||||
|
|
||||||
|
self.install_nsxt_plugin()
|
||||||
|
|
||||||
|
# Configure cluster
|
||||||
|
cluster_id = self.fuel_web.create_cluster(
|
||||||
|
name=self.__class__.__name__,
|
||||||
|
mode=DEPLOYMENT_MODE,
|
||||||
|
settings=pt_settings.cluster_settings,
|
||||||
|
configure_ssl=False)
|
||||||
|
|
||||||
|
# Assign roles to nodes
|
||||||
|
self.fuel_web.update_nodes(
|
||||||
|
cluster_id,
|
||||||
|
{'slave-01': ['controller'], })
|
||||||
|
|
||||||
|
# Configure VMWare vCenter settings
|
||||||
|
self.fuel_web.vcenter_configure(cluster_id)
|
||||||
|
|
||||||
|
self.enable_plugin(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.deploy_cluster_wait(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.run_ostf(
|
||||||
|
cluster_id=cluster_id,
|
||||||
|
test_sets=['smoke'])
|
||||||
|
|
||||||
|
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
|
||||||
|
groups=["nsxt_bvt"])
|
||||||
|
@log_snapshot_after_test
|
||||||
|
def nsxt_bvt(self):
|
||||||
|
"""Deploy cluster with plugin and vmware datastore backend.
|
||||||
|
|
||||||
|
Scenario:
|
||||||
|
1. Upload plugins to the master node.
|
||||||
|
2. Install plugin.
|
||||||
|
3. Create cluster with vcenter.
|
||||||
|
4. Add 3 node with controller role, compute, 2 ceph,
|
||||||
|
compute-vmware, cinder-vmware.
|
||||||
|
5. Deploy cluster.
|
||||||
|
6. Run OSTF.
|
||||||
|
|
||||||
|
Duration 3 hours
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.env.revert_snapshot("ready_with_9_slaves")
|
||||||
|
|
||||||
|
self.install_nsxt_plugin()
|
||||||
|
|
||||||
|
settings = pt_settings.cluster_settings
|
||||||
|
settings["images_ceph"] = True
|
||||||
|
# Configure cluster
|
||||||
|
cluster_id = self.fuel_web.create_cluster(
|
||||||
|
name=self.__class__.__name__,
|
||||||
|
mode=DEPLOYMENT_MODE,
|
||||||
|
settings=settings,
|
||||||
|
configure_ssl=False)
|
||||||
|
|
||||||
|
# Assign role to nodes
|
||||||
|
self.fuel_web.update_nodes(
|
||||||
|
cluster_id,
|
||||||
|
{'slave-01': ['controller'],
|
||||||
|
'slave-02': ['controller'],
|
||||||
|
'slave-03': ['controller'],
|
||||||
|
'slave-04': ['ceph-osd'],
|
||||||
|
'slave-05': ['ceph-osd'],
|
||||||
|
'slave-06': ['ceph-osd'],
|
||||||
|
'slave-07': ['compute-vmware'],
|
||||||
|
'slave-08': ['cinder-vmware'],
|
||||||
|
'slave-09': ['compute'],
|
||||||
|
})
|
||||||
|
|
||||||
|
target_node_1 = self.node_name('slave-07')
|
||||||
|
|
||||||
|
# Configure VMware vCenter settings
|
||||||
|
self.fuel_web.vcenter_configure(cluster_id,
|
||||||
|
multiclusters=True,
|
||||||
|
target_node_1=target_node_1)
|
||||||
|
|
||||||
|
self.enable_plugin(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.deploy_cluster_wait(cluster_id)
|
||||||
|
|
||||||
|
self.fuel_web.run_ostf(
|
||||||
|
cluster_id=cluster_id,
|
||||||
|
test_sets=['smoke', 'sanity', 'ha'],)
|
||||||
664
plugin_test/utils/jenkins/system_tests.sh
Executable file
664
plugin_test/utils/jenkins/system_tests.sh
Executable file
@@ -0,0 +1,664 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
# functions
|
||||||
|
|
||||||
|
INVALIDOPTS_ERR=100
|
||||||
|
NOJOBNAME_ERR=101
|
||||||
|
NOISOPATH_ERR=102
|
||||||
|
NOTASKNAME_ERR=103
|
||||||
|
NOWORKSPACE_ERR=104
|
||||||
|
NOISOFOUND_ERR=107
|
||||||
|
CDWORKSPACE_ERR=110
|
||||||
|
ISODOWNLOAD_ERR=111
|
||||||
|
INVALIDTASK_ERR=112
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
|
||||||
|
export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000}
|
||||||
|
export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true}
|
||||||
|
|
||||||
|
ShowHelp() {
|
||||||
|
cat << EOF
|
||||||
|
System Tests Script
|
||||||
|
|
||||||
|
It can perform several actions depending on Jenkins JOB_NAME it's ran from
|
||||||
|
or it can take names from exported environment variables or command line options
|
||||||
|
if you do need to override them.
|
||||||
|
|
||||||
|
-w (dir) - Path to workspace where fuelweb git repository was checked out.
|
||||||
|
Uses Jenkins' WORKSPACE if not set
|
||||||
|
-e (name) - Directly specify environment name used in tests
|
||||||
|
Uses ENV_NAME variable is set.
|
||||||
|
-j (name) - Name of this job. Determines ISO name, Task name and used by tests.
|
||||||
|
Uses Jenkins' JOB_NAME if not set
|
||||||
|
-v - Do not use virtual environment
|
||||||
|
-V (dir) - Path to python virtual environment
|
||||||
|
-i (file) - Full path to ISO file to build or use for tests.
|
||||||
|
Made from iso dir and name if not set.
|
||||||
|
-t (name) - Name of task this script should perform. Should be one of defined ones.
|
||||||
|
Taken from Jenkins' job's suffix if not set.
|
||||||
|
-o (str) - Allows you any extra command line option to run test job if you
|
||||||
|
want to use some parameters.
|
||||||
|
-a (str) - Allows you to path NOSE_ATTR to the test job if you want
|
||||||
|
to use some parameters.
|
||||||
|
-A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes
|
||||||
|
as python expressions.
|
||||||
|
-m (name) - Use this mirror to build ISO from.
|
||||||
|
Uses 'srt' if not set.
|
||||||
|
-U - ISO URL for tests.
|
||||||
|
Null by default.
|
||||||
|
-r (yes/no) - Should built ISO file be places with build number tag and
|
||||||
|
symlinked to the last build or just copied over the last file.
|
||||||
|
-b (num) - Allows you to override Jenkins' build number if you need to.
|
||||||
|
-l (dir) - Path to logs directory. Can be set by LOGS_DIR evironment variable.
|
||||||
|
Uses WORKSPACE/logs if not set.
|
||||||
|
-d - Dry run mode. Only show what would be done and do nothing.
|
||||||
|
Useful for debugging.
|
||||||
|
-k - Keep previously created test environment before tests run
|
||||||
|
-K - Keep test environment after tests are finished
|
||||||
|
-h - Show this help page
|
||||||
|
|
||||||
|
Most variables uses guesses from Jenkins' job name but can be overriden
|
||||||
|
by exported variable before script is run or by one of command line options.
|
||||||
|
|
||||||
|
You can override following variables using export VARNAME="value" before running this script
|
||||||
|
WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually
|
||||||
|
JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name.
|
||||||
|
|
||||||
|
If task name is "iso" it will make iso file
|
||||||
|
Other defined names will run Nose tests using previously built ISO file.
|
||||||
|
|
||||||
|
ISO file name is taken from job name prefix
|
||||||
|
Task name is taken from job name suffix
|
||||||
|
Separator is one dot '.'
|
||||||
|
|
||||||
|
For example if JOB_NAME is:
|
||||||
|
mytest.somestring.iso
|
||||||
|
ISO name: mytest.iso
|
||||||
|
Task name: iso
|
||||||
|
If ran with such JOB_NAME iso file with name mytest.iso will be created
|
||||||
|
|
||||||
|
If JOB_NAME is:
|
||||||
|
mytest.somestring.node
|
||||||
|
ISO name: mytest.iso
|
||||||
|
Task name: node
|
||||||
|
If script was run with this JOB_NAME node tests will be using ISO file mytest.iso.
|
||||||
|
|
||||||
|
First you should run mytest.somestring.iso job to create mytest.iso.
|
||||||
|
Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
GlobalVariables() {
|
||||||
|
# where built iso's should be placed
|
||||||
|
# use hardcoded default if not set before by export
|
||||||
|
ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}"
|
||||||
|
|
||||||
|
# name of iso file
|
||||||
|
# taken from jenkins job prefix
|
||||||
|
# if not set before by variable export
|
||||||
|
if [ -z "${ISO_NAME}" ]; then
|
||||||
|
ISO_NAME="${JOB_NAME%.*}.iso"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# full path where iso file should be placed
|
||||||
|
# make from iso name and path to iso shared directory
|
||||||
|
# if was not overriden by options or export
|
||||||
|
if [ -z "${ISO_PATH}" ]; then
|
||||||
|
ISO_PATH="${ISO_DIR}/${ISO_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# what task should be ran
|
||||||
|
# it's taken from jenkins job name suffix if not set by options
|
||||||
|
if [ -z "${TASK_NAME}" ]; then
|
||||||
|
TASK_NAME="${JOB_NAME##*.}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# do we want to keep iso's for each build or just copy over single file
|
||||||
|
ROTATE_ISO="${ROTATE_ISO:=yes}"
|
||||||
|
|
||||||
|
# choose mirror to build iso from. Default is 'srt' for Saratov's mirror
|
||||||
|
# you can change mirror by exporting USE_MIRROR variable before running this script
|
||||||
|
USE_MIRROR="${USE_MIRROR:=srt}"
|
||||||
|
|
||||||
|
# only show what commands would be executed but do nothing
|
||||||
|
# this feature is usefull if you want to debug this script's behaviour
|
||||||
|
DRY_RUN="${DRY_RUN:=no}"
|
||||||
|
|
||||||
|
VENV="${VENV:=yes}"
|
||||||
|
}
|
||||||
|
|
||||||
|
GetoptsVariables() {
|
||||||
|
while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:dkKe:v:h" opt; do
|
||||||
|
case $opt in
|
||||||
|
w)
|
||||||
|
WORKSPACE="${OPTARG}"
|
||||||
|
;;
|
||||||
|
j)
|
||||||
|
JOB_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
i)
|
||||||
|
ISO_PATH="${OPTARG}"
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
TASK_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
o)
|
||||||
|
TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}"
|
||||||
|
;;
|
||||||
|
a)
|
||||||
|
NOSE_ATTR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
A)
|
||||||
|
NOSE_EVAL_ATTR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
m)
|
||||||
|
USE_MIRROR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
U)
|
||||||
|
ISO_URL="${OPTARG}"
|
||||||
|
;;
|
||||||
|
r)
|
||||||
|
ROTATE_ISO="${OPTARG}"
|
||||||
|
;;
|
||||||
|
V)
|
||||||
|
VENV_PATH="${OPTARG}"
|
||||||
|
;;
|
||||||
|
l)
|
||||||
|
LOGS_DIR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
k)
|
||||||
|
KEEP_BEFORE="yes"
|
||||||
|
;;
|
||||||
|
K)
|
||||||
|
KEEP_AFTER="yes"
|
||||||
|
;;
|
||||||
|
e)
|
||||||
|
ENV_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
d)
|
||||||
|
DRY_RUN="yes"
|
||||||
|
;;
|
||||||
|
v)
|
||||||
|
VENV="no"
|
||||||
|
;;
|
||||||
|
h)
|
||||||
|
ShowHelp
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
\?)
|
||||||
|
echo "Invalid option: -$OPTARG"
|
||||||
|
ShowHelp
|
||||||
|
exit $INVALIDOPTS_ERR
|
||||||
|
;;
|
||||||
|
:)
|
||||||
|
echo "Option -$OPTARG requires an argument."
|
||||||
|
ShowHelp
|
||||||
|
exit $INVALIDOPTS_ERR
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckVariables() {
|
||||||
|
|
||||||
|
if [ -z "${JOB_NAME}" ]; then
|
||||||
|
echo "Error! JOB_NAME is not set!"
|
||||||
|
exit $NOJOBNAME_ERR
|
||||||
|
fi
|
||||||
|
if [ -z "${ISO_PATH}" ]; then
|
||||||
|
echo "Error! ISO_PATH is not set!"
|
||||||
|
exit $NOISOPATH_ERR
|
||||||
|
fi
|
||||||
|
if [ -z "${TASK_NAME}" ]; then
|
||||||
|
echo "Error! TASK_NAME is not set!"
|
||||||
|
exit $NOTASKNAME_ERR
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSPACE}" ]; then
|
||||||
|
echo "Error! WORKSPACE is not set!"
|
||||||
|
exit $NOWORKSPACE_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${POOL_PUBLIC}" ]; then
|
||||||
|
export POOL_PUBLIC='172.16.0.0/24:24'
|
||||||
|
fi
|
||||||
|
if [ -z "${POOL_MANAGEMENT}" ]; then
|
||||||
|
export POOL_MANAGEMENT='172.16.1.0/24:24'
|
||||||
|
fi
|
||||||
|
if [ -z "${POOL_PRIVATE}" ]; then
|
||||||
|
export POOL_PRIVATE='192.168.0.0/24:24'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Vcenter variables
|
||||||
|
if [ -z "${DISABLE_SSL}" ]; then
|
||||||
|
export DISABLE_SSL="true"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_USE}" ]; then
|
||||||
|
export VCENTER_USE="true"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_IP}" ]; then
|
||||||
|
export VCENTER_IP="172.16.0.254"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_USERNAME}" ]; then
|
||||||
|
export VCENTER_USERNAME="administrator@vsphere.local"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_PASSWORD}" ]; then
|
||||||
|
echo "Error! VCENTER_PASSWORD is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${VC_DATACENTER}" ]; then
|
||||||
|
export VC_DATACENTER="Datacenter"
|
||||||
|
fi
|
||||||
|
if [ -z "${VC_DATASTORE}" ]; then
|
||||||
|
export VC_DATASTORE="nfs"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_IMAGE_DIR}" ]; then
|
||||||
|
export VCENTER_IMAGE_DIR="/openstack_glance"
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSTATION_NODES}" ]; then
|
||||||
|
export WORKSTATION_NODES="esxi1 esxi2 esxi3 vcenter trusty nsx-edge"
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSTATION_IFS}" ]; then
|
||||||
|
export WORKSTATION_IFS="vmnet1 vmnet2 vmnet5"
|
||||||
|
fi
|
||||||
|
if [ -z "${VCENTER_CLUSTERS}" ]; then
|
||||||
|
export VCENTER_CLUSTERS="Cluster1,Cluster2"
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSTATION_SNAPSHOT}" ]; then
|
||||||
|
echo "Error! WORKSTATION_SNAPSHOT is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSTATION_USERNAME}" ]; then
|
||||||
|
echo "Error! WORKSTATION_USERNAME is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${WORKSTATION_PASSWORD}" ]; then
|
||||||
|
echo "Error! WORKSTATION_PASSWORD is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# NSXt variables
|
||||||
|
if [ -z "${NSXT_PLUGIN_PATH}" ]; then
|
||||||
|
echo "Error! NSXT_PLUGIN_PATH is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${NEUTRON_SEGMENT_TYPE}" ]; then
|
||||||
|
export NEUTRON_SEGMENT_TYPE="tun"
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_INSECURE}" ]; then
|
||||||
|
export NSXT_INSECURE='true'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_MANAGERS_IP}" ]; then
|
||||||
|
export NSXT_MANAGERS_IP="172.16.0.249"
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_USER}" ]; then
|
||||||
|
export NSXT_USER='admin'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_PASSWORD}" ]; then
|
||||||
|
echo "Error! NSXT_PASSWORD is not set!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_OVERLAY_TZ_UUID}" ]; then
|
||||||
|
export NSXT_OVERLAY_TZ_UUID='0eeb1b85-c826-403d-8762-6a9c23a4f132'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_VLAN_TZ_UUID}" ]; then
|
||||||
|
export NSXT_VLAN_TZ_UUID='8efe20d2-e71a-4d6e-acdd-f78a2ec2e90c'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_TIER0_ROUTER_UUID}" ]; then
|
||||||
|
export NSXT_TIER0_ROUTER_UUID='606acd01-c5f8-40ea-ae20-9a91eb7ebcb4'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_EDGE_CLUSTER_UUID}" ]; then
|
||||||
|
export NSXT_EDGE_CLUSTER_UUID='c53d602a-4010-47cc-a8b1-4ef11d0a3edd'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_UPLINK_PROFILE_UUID}" ]; then
|
||||||
|
export NSXT_UPLINK_PROFILE_UUID='99864272-b34f-46a5-89c8-5657fa7042ea'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_CONTROLLER_IP_POOL_UUID}" ]; then
|
||||||
|
export NSXT_CONTROLLER_IP_POOL_UUID='2e06fcb2-7c5b-4515-a7a9-98809c7b863a'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_CONTROLLER_PNICS_PAIRS}" ]; then
|
||||||
|
export NSXT_CONTROLLER_PNICS_PAIRS='enp0s6:uplink'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_COMPUTE_IP_POOL_UUID}" ]; then
|
||||||
|
export NSXT_COMPUTE_IP_POOL_UUID='2e06fcb2-7c5b-4515-a7a9-98809c7b863a'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_COMPUTE_PNICS_PAIRS}" ]; then
|
||||||
|
export NSXT_COMPUTE_PNICS_PAIRS='enp0s6:uplink'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${NSXT_FLOATING_IP_RANGE}" ]; then
|
||||||
|
export NSXT_FLOATING_IP_RANGE='172.16.212.2-172.16.212.40'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_FLOATING_NET_CIDR}" ]; then
|
||||||
|
export NSXT_FLOATING_NET_CIDR='172.16.212.0/24'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_ROUTING_NET_CIDR}" ]; then
|
||||||
|
export NSXT_ROUTING_NET_CIDR='172.16.214.0/30'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_FLOATING_NET_GW}" ]; then
|
||||||
|
export NSXT_FLOATING_NET_GW='172.16.212.1'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_INTERNAL_NET_CIDR}" ]; then
|
||||||
|
export NSXT_INTERNAL_NET_CIDR='192.168.251.0/24'
|
||||||
|
fi
|
||||||
|
if [ -z "${NSXT_INTERNAL_NET_DNS}" ]; then
|
||||||
|
export NSXT_INTERNAL_NET_DNS='8.8.8.8'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export settings
|
||||||
|
if [ -z "${NODE_VOLUME_SIZE}" ]; then
|
||||||
|
export NODE_VOLUME_SIZE=350
|
||||||
|
fi
|
||||||
|
if [ -z "${ADMIN_NODE_MEMORY}" ]; then
|
||||||
|
export ADMIN_NODE_MEMORY=4096
|
||||||
|
fi
|
||||||
|
if [ -z "${ADMIN_NODE_CPU}" ]; then
|
||||||
|
export ADMIN_NODE_CPU=4
|
||||||
|
fi
|
||||||
|
if [ -z "${SLAVE_NODE_MEMORY}" ]; then
|
||||||
|
export SLAVE_NODE_MEMORY=4096
|
||||||
|
fi
|
||||||
|
if [ -z "${SLAVE_NODE_CPU}" ]; then
|
||||||
|
export SLAVE_NODE_CPU=4
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
CdWorkSpace() {
|
||||||
|
# chdir into workspace or fail if could not
|
||||||
|
if [ "${DRY_RUN}" != "yes" ]; then
|
||||||
|
cd "${WORKSPACE}"
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Cannot cd to WORKSPACE!"
|
||||||
|
exit $CDWORKSPACE_ERR
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo cd "${WORKSPACE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
RunTest() {
|
||||||
|
# Run test selected by task name
|
||||||
|
|
||||||
|
# check if iso file exists
|
||||||
|
if [ ! -f "${ISO_PATH}" ]; then
|
||||||
|
if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then
|
||||||
|
echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!"
|
||||||
|
exit $NOISOFOUND_ERR
|
||||||
|
else
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo wget -c ${ISO_URL} -O ${ISO_PATH}
|
||||||
|
else
|
||||||
|
echo "No ${ISO_PATH} found. Trying to download file."
|
||||||
|
wget -c ${ISO_URL} -O ${ISO_PATH}
|
||||||
|
rc=$?
|
||||||
|
if [ $rc -ne 0 ]; then
|
||||||
|
echo "Failed to fetch ISO from ${ISO_URL}"
|
||||||
|
exit $ISODOWNLOAD_ERR
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${VENV_PATH}" ]; then
|
||||||
|
VENV_PATH="/home/jenkins/venv-nailgun-tests"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run python virtualenv
|
||||||
|
if [ "${VENV}" = "yes" ]; then
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo . $VENV_PATH/bin/activate
|
||||||
|
else
|
||||||
|
. $VENV_PATH/bin/activate
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${ENV_NAME}" = "" ]; then
|
||||||
|
ENV_NAME="${JOB_NAME}_system_test"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${LOGS_DIR}" = "" ]; then
|
||||||
|
LOGS_DIR="${WORKSPACE}/logs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "$LOGS_DIR" ]; then
|
||||||
|
mkdir -p $LOGS_DIR
|
||||||
|
fi
|
||||||
|
|
||||||
|
export ENV_NAME
|
||||||
|
export LOGS_DIR
|
||||||
|
export ISO_PATH
|
||||||
|
|
||||||
|
if [ "${KEEP_BEFORE}" != "yes" ]; then
|
||||||
|
# remove previous environment
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo dos.py erase "${ENV_NAME}"
|
||||||
|
else
|
||||||
|
if dos.py list | grep -q "^${ENV_NAME}\$" ; then
|
||||||
|
dos.py erase "${ENV_NAME}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gather additional option for this nose test run
|
||||||
|
OPTS=""
|
||||||
|
if [ -n "${NOSE_ATTR}" ]; then
|
||||||
|
OPTS="${OPTS} -a ${NOSE_ATTR}"
|
||||||
|
fi
|
||||||
|
if [ -n "${NOSE_EVAL_ATTR}" ]; then
|
||||||
|
OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}"
|
||||||
|
fi
|
||||||
|
if [ -n "${TEST_OPTIONS}" ]; then
|
||||||
|
OPTS="${OPTS} ${TEST_OPTIONS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
clean_old_bridges
|
||||||
|
|
||||||
|
# run python test set to create environments, deploy and test product
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
|
||||||
|
echo python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS}
|
||||||
|
else
|
||||||
|
export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
|
||||||
|
echo ${PYTHONPATH}
|
||||||
|
python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS} &
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
SYSTEST_PID=$!
|
||||||
|
|
||||||
|
if ! ps -p $SYSTEST_PID > /dev/null
|
||||||
|
then
|
||||||
|
echo System tests exited prematurely, aborting
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
while [ "$(virsh net-list | grep -c $ENV_NAME)" -ne 5 ];do sleep 10
|
||||||
|
if ! ps -p $SYSTEST_PID > /dev/null
|
||||||
|
then
|
||||||
|
echo System tests exited prematurely, aborting
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
|
||||||
|
# Configre vcenter nodes and interfaces
|
||||||
|
setup_net $ENV_NAME
|
||||||
|
clean_iptables
|
||||||
|
setup_stt $ENV_NAME
|
||||||
|
setup_external_net
|
||||||
|
|
||||||
|
revert_ws "$WORKSTATION_NODES" || { echo "killing $SYSTEST_PID and its childs" && pkill --parent $SYSTEST_PID && kill $SYSTEST_PID && exit 1; }
|
||||||
|
|
||||||
|
echo waiting for system tests to finish
|
||||||
|
wait $SYSTEST_PID
|
||||||
|
|
||||||
|
export RES=$?
|
||||||
|
echo ENVIRONMENT NAME is $ENV_NAME
|
||||||
|
virsh net-dumpxml ${ENV_NAME}_admin | grep -P "(\d+\.){3}" -o | awk '{print "Fuel master node IP: "$0"2"}'
|
||||||
|
|
||||||
|
if [ "${KEEP_AFTER}" != "yes" ]; then
|
||||||
|
# remove environment after tests
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo dos.py destroy "${ENV_NAME}"
|
||||||
|
else
|
||||||
|
dos.py destroy "${ENV_NAME}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "${RES}"
|
||||||
|
}
|
||||||
|
|
||||||
|
RouteTasks() {
|
||||||
|
# this selector defines task names that are recognised by this script
|
||||||
|
# and runs corresponding jobs for them
|
||||||
|
# running any jobs should exit this script
|
||||||
|
|
||||||
|
case "${TASK_NAME}" in
|
||||||
|
test)
|
||||||
|
RunTest
|
||||||
|
;;
|
||||||
|
iso)
|
||||||
|
MakeISO
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown task: ${TASK_NAME}!"
|
||||||
|
exit $INVALIDTASK_ERR
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
add_interface_to_bridge() {
|
||||||
|
env=$1
|
||||||
|
net_name=$2
|
||||||
|
nic=$3
|
||||||
|
ip=$4
|
||||||
|
|
||||||
|
for net in $(virsh net-list |grep ${env}_${net_name} |awk '{print $1}');do
|
||||||
|
bridge=$(virsh net-info $net |grep -i bridge |awk '{print $2}')
|
||||||
|
setup_bridge $bridge $nic $ip && echo $net_name bridge $bridge ready
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_bridge() {
|
||||||
|
bridge=$1
|
||||||
|
nic=$2
|
||||||
|
ip=$3
|
||||||
|
|
||||||
|
sudo /sbin/brctl stp $bridge off
|
||||||
|
sudo /sbin/brctl addif $bridge $nic
|
||||||
|
# set if with existing ip down
|
||||||
|
for itf in $(sudo ip -o addr show to $ip |cut -d' ' -f2); do
|
||||||
|
echo deleting $ip from $itf
|
||||||
|
sudo ip addr del dev $itf $ip
|
||||||
|
done
|
||||||
|
echo adding $ip to $bridge
|
||||||
|
sudo /sbin/ip addr add $ip dev $bridge
|
||||||
|
echo $nic added to $bridge
|
||||||
|
sudo /sbin/ip link set dev $bridge up
|
||||||
|
if sudo /sbin/iptables-save |grep $bridge | grep -i reject| grep -q FORWARD;then
|
||||||
|
sudo /sbin/iptables -D FORWARD -o $bridge -j REJECT --reject-with icmp-port-unreachable
|
||||||
|
sudo /sbin/iptables -D FORWARD -i $bridge -j REJECT --reject-with icmp-port-unreachable
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_old_bridges() {
|
||||||
|
for intf in $WORKSTATION_IFS; do
|
||||||
|
for br in $(/sbin/brctl show | grep -v "bridge name" | cut -f1 -d' '); do
|
||||||
|
/sbin/brctl show $br| grep -q $intf && sudo /sbin/brctl delif $br $intf \
|
||||||
|
&& sudo /sbin/ip link set dev $br down && echo $intf deleted from $br
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_iptables() {
|
||||||
|
sudo /sbin/iptables -F
|
||||||
|
sudo /sbin/iptables -t nat -F
|
||||||
|
sudo /sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
}
|
||||||
|
|
||||||
|
revert_ws() {
|
||||||
|
for i in $1
|
||||||
|
do
|
||||||
|
vmrun -T ws-shared -h https://localhost:443/sdk -u $WORKSTATION_USERNAME -p $WORKSTATION_PASSWORD listRegisteredVM | grep -q $i || { echo "VM $i does not exist"; continue; }
|
||||||
|
echo vmrun: reverting $i to $WORKSTATION_SNAPSHOT
|
||||||
|
vmrun -T ws-shared -h https://localhost:443/sdk -u $WORKSTATION_USERNAME -p $WORKSTATION_PASSWORD revertToSnapshot "[standard] $i/$i.vmx" $WORKSTATION_SNAPSHOT || { echo "Error: revert of $i failed"; return 1; }
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in $1
|
||||||
|
do
|
||||||
|
echo vmrun: starting $i
|
||||||
|
vmrun -T ws-shared -h https://localhost:443/sdk -u $WORKSTATION_USERNAME -p $WORKSTATION_PASSWORD start "[standard] $i/$i.vmx" || { echo "Error: $i failed to start"; return 1; }
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_net() {
|
||||||
|
env=$1
|
||||||
|
add_interface_to_bridge $env public vmnet1 172.16.0.1/24
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_stt() {
|
||||||
|
set -e
|
||||||
|
env=$1
|
||||||
|
net_name='private'
|
||||||
|
nic='vmnet2'
|
||||||
|
|
||||||
|
for net in $(virsh net-list |grep ${env}_${net_name} |awk '{print $1}');do
|
||||||
|
bridge=$(virsh net-info $net | grep -i bridge | awk '{print $2}')
|
||||||
|
done
|
||||||
|
sudo /sbin/brctl stp $bridge off
|
||||||
|
sudo /sbin/brctl addif $bridge $nic
|
||||||
|
echo $nic added to $bridge
|
||||||
|
sudo /sbin/ip link set dev $bridge up
|
||||||
|
if sudo /sbin/iptables-save |grep $bridge | grep -i reject| grep -q FORWARD;then
|
||||||
|
sudo /sbin/iptables -D FORWARD -o $bridge -j REJECT --reject-with icmp-port-unreachable
|
||||||
|
sudo /sbin/iptables -D FORWARD -i $bridge -j REJECT --reject-with icmp-port-unreachable
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Stt added to $net_name bridge $bridge"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_external_net() {
|
||||||
|
nic='vmnet5'
|
||||||
|
|
||||||
|
ip=${NSXT_ROUTING_NET_CIDR%\.*}.1
|
||||||
|
gw_ip=${NSXT_ROUTING_NET_CIDR%\.*}.2
|
||||||
|
mask=${NSXT_ROUTING_NET_CIDR##*\/}
|
||||||
|
|
||||||
|
#set if with existing ip down
|
||||||
|
for itf in $(sudo ip -o addr show to $ip | cut -d' ' -f2); do
|
||||||
|
echo deleting $ip from $itf
|
||||||
|
sudo ip addr del $ip/$mask dev $itf
|
||||||
|
done
|
||||||
|
|
||||||
|
for itf in $(sudo ip -o ro show to ${NSXT_FLOATING_NET_CIDR} | cut -d' ' -f3); do
|
||||||
|
echo deleting route to ${NSXT_FLOATING_NET_CIDR} dev $itf
|
||||||
|
sudo ip ro del ${NSXT_FLOATING_NET_CIDR} dev $itf
|
||||||
|
done
|
||||||
|
|
||||||
|
set -e
|
||||||
|
sudo /sbin/ip addr add ${ip}/${mask} dev $nic
|
||||||
|
sudo /sbin/ip ro add ${NSXT_FLOATING_NET_CIDR} via ${gw_ip}
|
||||||
|
echo "Routing net added to $nic"
|
||||||
|
}
|
||||||
|
|
||||||
|
# MAIN
|
||||||
|
|
||||||
|
# first we want to get variable from command line options
|
||||||
|
GetoptsVariables "${@}"
|
||||||
|
|
||||||
|
# then we define global variables and there defaults when needed
|
||||||
|
GlobalVariables
|
||||||
|
|
||||||
|
# check do we have all critical variables set
|
||||||
|
CheckVariables
|
||||||
|
|
||||||
|
# first we chdir into our working directory unless we dry run
|
||||||
|
CdWorkSpace
|
||||||
|
|
||||||
|
# finally we can choose what to do according to TASK_NAME
|
||||||
|
RouteTasks
|
||||||
25
plugin_test/utils/local_build_plugin.sh
Executable file
25
plugin_test/utils/local_build_plugin.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
ROOT="$(dirname $(readlink -f $0))/../../"
|
||||||
|
UBUNTU_REPO_DIR="$ROOT/repositories/ubuntu"
|
||||||
|
TMP_DIR="$(mktemp -d)"
|
||||||
|
TMP_ARCHIVE="${TMP_DIR}/deb.zip"
|
||||||
|
# REPO_PATH get from env variable
|
||||||
|
|
||||||
|
# get latest succefull nsxv neutron plugin build
|
||||||
|
find "$UBUNTU_REPO_DIR" -name "python-vmware-nsx*.deb" -delete
|
||||||
|
wget --no-check-certificate -O "$TMP_ARCHIVE" "$REPO_PATH"
|
||||||
|
unzip "$TMP_ARCHIVE" -d "$TMP_DIR"
|
||||||
|
find "$TMP_DIR" -name "*.deb" -exec mv {} "$UBUNTU_REPO_DIR" \;
|
||||||
|
rm -rf "${TMP_DIR:?}"
|
||||||
|
|
||||||
|
# check puppet manifest, 'while' need for exit if error found
|
||||||
|
find "$ROOT/deployment_scripts/puppet/modules/nsxv" -type f -name "*.pp"| while read manifest ; do
|
||||||
|
puppet parser validate "$manifest"
|
||||||
|
done
|
||||||
|
find "$ROOT/deployment_scripts/puppet/manifests" -type f -name "*.pp" | while read manifest ; do
|
||||||
|
puppet parser validate "$manifest"
|
||||||
|
done
|
||||||
|
|
||||||
|
# build plugin
|
||||||
|
fpb --build "$ROOT"
|
||||||
28
spec/conf.py
28
spec/conf.py
@@ -1,19 +1,17 @@
|
|||||||
# -*- coding: utf-8 -*-
|
"""Copyright 2016 Mirantis, Inc.
|
||||||
#
|
|
||||||
# Fuel NSXv plugin documentation build configuration file, created by
|
|
||||||
# sphinx-quickstart on Fri Aug 14 12:14:29 2015.
|
|
||||||
#
|
|
||||||
# This file is execfile()d with the current directory set to its
|
|
||||||
# containing dir.
|
|
||||||
#
|
|
||||||
# Note that not all possible configuration values are present in this
|
|
||||||
# autogenerated file.
|
|
||||||
#
|
|
||||||
# All configuration values have a default; values that are commented out
|
|
||||||
# serve to show the default.
|
|
||||||
|
|
||||||
import sys
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
import os
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
"""
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
|||||||
Reference in New Issue
Block a user