Add support for podified environment

In order to support podified environment the following new configs
were added:
- proxy_host_address
- proxy_host_user
- proxy_host_key_data
- proxy_host_inventory_path

base.py contains the following changes:
- Enhanced node discovery to get missing info from the inventory file.
- Implemented ovn dbs access through openshift client on a proxy host
- check_service_setting function is now able to check a list of config
  files inside a pod. Access to the operator pod through a proxy host.

Change-Id: Idb3bc9d1a304b6e7a77393580c110ed08e863a90
This commit is contained in:
Roman Safronov 2024-03-17 14:18:15 +02:00
parent 46de9d814c
commit 2a5b9be923
2 changed files with 205 additions and 24 deletions

View File

@ -90,5 +90,22 @@ WhiteboxNeutronPluginOptions = [
cfg.IntOpt('ovn_max_controller_gw_ports_per_router', cfg.IntOpt('ovn_max_controller_gw_ports_per_router',
default=1, default=1,
help='The number of network nodes used ' help='The number of network nodes used '
'for the OVN router HA.') 'for the OVN router HA.'),
cfg.StrOpt('proxy_host_address',
default='',
help='Intermediate host to run commands on podified '
'environment'),
cfg.StrOpt('proxy_host_user',
default='',
help='User of intermediate host to run commands on podified '
'environment'),
cfg.StrOpt('proxy_host_key_data',
default='{}',
help='Key data for accessing intermediate host on podified '
'environment, in dict format, i.e. {"key":"key_data"}.'
'The key_data should be key without first and last line '
'and all new lines replaced by \n'),
cfg.StrOpt('proxy_host_inventory_path',
default='',
help='Nodes inventory on proxy host on podified environment')
] ]

View File

@ -13,9 +13,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import base64 import base64
import json
import os
import random import random
import re import re
import time import time
import yaml
import netaddr import netaddr
from netifaces import AF_INET from netifaces import AF_INET
@ -66,8 +69,13 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
cls.neutron_api_prefix = '' cls.neutron_api_prefix = ''
cls.neutron_conf = WB_CONF.neutron_config cls.neutron_conf = WB_CONF.neutron_config
elif WB_CONF.openstack_type == 'podified': elif WB_CONF.openstack_type == 'podified':
# NOTE(mblue): add podified option cls.proxy_host_key = cls._get_podified_proxy_host_key()
pass cls.proxy_host_client = cls.get_node_client(
host=WB_CONF.proxy_host_address,
username=WB_CONF.proxy_host_user,
pkey=f"{cls.proxy_host_key}")
cls.master_node_client = cls.proxy_host_client
cls.master_cont_cmd_executor = cls.proxy_host_client
else: else:
LOG.warning(("Unrecognized deployer tool '{}', plugin supports " LOG.warning(("Unrecognized deployer tool '{}', plugin supports "
"openstack_type as devstack/podified." "openstack_type as devstack/podified."
@ -75,11 +83,13 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
@classmethod @classmethod
def run_on_master_controller(cls, cmd): def run_on_master_controller(cls, cmd):
if WB_CONF.openstack_type == 'podified':
output = cls.proxy_host_client.exec_command(cmd)
if WB_CONF.openstack_type == 'devstack': if WB_CONF.openstack_type == 'devstack':
output, errors = local_utils.run_local_cmd(cmd) output, errors = local_utils.run_local_cmd(cmd)
LOG.debug("Stderr: {}".format(errors.decode())) LOG.debug("Stderr: {}".format(errors.decode()))
output = output.decode() output = output.decode()
LOG.debug("Output: {}".format(output)) LOG.debug("Output: {}".format(output))
return output.strip() return output.strip()
def get_host_for_server(self, server_id): def get_host_for_server(self, server_id):
@ -98,10 +108,14 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
return subnet['gateway_ip'] return subnet['gateway_ip']
@staticmethod @staticmethod
def get_node_client(host): def get_node_client(
return ssh.Client( host, username=WB_CONF.overcloud_ssh_user, pkey=None,
host=host, username=WB_CONF.overcloud_ssh_user, key_filename=WB_CONF.overcloud_key_file):
key_filename=WB_CONF.overcloud_key_file) if pkey:
return ssh.Client(host=host, username=username, pkey=pkey)
else:
return ssh.Client(host=host, username=username,
key_filename=key_filename)
def get_local_ssh_client(self, network): def get_local_ssh_client(self, network):
return ssh.Client( return ssh.Client(
@ -128,9 +142,61 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
return ip_address return ip_address
return None return None
def get_fip_port_details(self, fip):
fip_ports = self.os_admin.network_client.list_ports(
network_id=CONF.network.public_network_id,
device_owner=constants.DEVICE_OWNER_FLOATINGIP)['ports']
for fp in fip_ports:
if (fp.get('fixed_ips') and len(fp['fixed_ips']) != 0 and
fp['fixed_ips'][0]['ip_address'] ==
fip['floating_ip_address']):
return fp
return None
@classmethod
def get_podified_nodes_data(cls):
def append_node_data(node, node_group_data):
if 'ocp' in node:
node_name = node.replace("ocp", "master")
key = 'ansible_ssh_private_key_file' # meaning dict key here
# save path of ocp nodes key (if not yet), we'll need it later
if not hasattr(cls, 'ocp_nodes_key_path'):
cls.ocp_nodes_key_path = (
node_group_data[node][key].replace(
'~', '/home/{}'.format(WB_CONF.proxy_host_user)))
node_key = node_group_data[node][key].split('/')[-1]
else:
node_name = node
node_key = 'id_cifw_key'
node_data = {
'name': node_name,
'ip': node_group_data[node]['ansible_host'],
'user': node_group_data[node]['ansible_user'],
'key': node_key}
nodes.append(node_data)
nodes = []
inventory_data = yaml.safe_load(
cls.proxy_host_client.exec_command(
'cat ' + WB_CONF.proxy_host_inventory_path))
computes_data = inventory_data['computes']['hosts']
for node in computes_data:
append_node_data(node, computes_data)
ocps_data = inventory_data['ocps']['hosts']
for node in ocps_data:
append_node_data(node, ocps_data)
return nodes
@classmethod
def _get_podified_proxy_host_key(cls):
start = '-----BEGIN OPENSSH PRIVATE KEY-----\n'
end = '-----END OPENSSH PRIVATE KEY-----\n'
key = json.loads(WB_CONF.proxy_host_key_data)['key']
return '{}{}{}'.format(start, key, end)
@classmethod @classmethod
def append_node(cls, host, is_compute=False, is_networker=False): def append_node(cls, host, is_compute=False, is_networker=False):
hostname = host.split('.')[0] hostname = host.split('.')[0]
for node in cls.nodes: for node in cls.nodes:
if node['name'] == hostname: if node['name'] == hostname:
@ -139,12 +205,23 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
if not node['is_compute']: if not node['is_compute']:
node['is_compute'] = is_compute node['is_compute'] = is_compute
return return
if WB_CONF.openstack_type == 'podified':
node = {'name': hostname, for node in cls.nodes_data:
'client': cls.get_node_client(host), LOG.debug(
'is_networker': is_networker, "hostname='{}', name='{}'".format(hostname, node['name']))
'is_controller': False, if node['name'] == hostname:
'is_compute': is_compute} extra_params = {
'client': cls.get_node_client(
host=node['ip'], username=node['user'],
pkey=f"{cls.keys_data[node['key']]}")}
break
else:
extra_params = {'client': cls.get_node_client(host)}
params = {'name': hostname,
'is_networker': is_networker,
'is_controller': False,
'is_compute': is_compute}
node = {**params, **extra_params}
# Here we are checking if there are controller-specific # Here we are checking if there are controller-specific
# processes running on the node # processes running on the node
output = node['client'].exec_command( output = node['client'].exec_command(
@ -155,6 +232,12 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
@classmethod @classmethod
def discover_nodes(cls): def discover_nodes(cls):
if WB_CONF.openstack_type == 'podified':
cls.nodes_data = cls.get_podified_nodes_data()
cls.keys_data = {
'id_cifw_key': cls.proxy_host_key,
'devscripts_key': cls.proxy_host_client.exec_command(
'cat ' + cls.ocp_nodes_key_path)}
agents = cls.os_admin.network.AgentsClient().list_agents()['agents'] agents = cls.os_admin.network.AgentsClient().list_agents()['agents']
if cls.has_ovn_support: if cls.has_ovn_support:
l3_agent_hosts = [ l3_agent_hosts = [
@ -173,6 +256,69 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
for host in l3_agent_hosts: for host in l3_agent_hosts:
cls.append_node(host, is_networker=True) cls.append_node(host, is_networker=True)
@classmethod
def get_pod_of_service(cls, service='neutron'):
# (rsafrono) at this moment only neutron service pod handled
# since it's the only that existing tests are using
if service == 'neutron':
return cls.proxy_host_client.exec_command(
"oc get pods | grep neutron | grep -v meta | "
"cut -d' ' -f1").strip()
@classmethod
def get_configs_of_service(cls, service='neutron'):
# (rsafrono) at this moment only neutron configs were handled
# since it's the only service that existing tests are using
if service == 'neutron':
pod = cls.get_pod_of_service(service)
return cls.proxy_host_client.exec_command(
'oc rsh {} find {} -type f'.format(pod, os.path.split(
WB_CONF.neutron_config)[0])).strip().split('\n')
@classmethod
def check_service_setting(
cls, host, service='neutron', config_files=None,
section='DEFAULT', param='', value='True',
msg='Required config value is missing', skip_if_fails=True):
"""Check if a service on a node has a setting with a value in config
:param node(dict): Dictionary with host-related parameters,
host['client'] is a required parameter
:param service(str): Name of the containerized service.
:param config_files(list): List with paths to config files. List makes
sense on podified where e.g. neutron has
2 config files with same sections.
:param section(str): Section in the config file.
:param value(str): Expected value.
:param msg(str): Message to print in case of expected value not found
:param skip_if_fails(bool): skip if the check fails - if it fails and
skip_if_fails is False, return False.
"""
if WB_CONF.openstack_type == 'podified':
service_prefix = "oc rsh {}".format(
cls.get_pod_of_service(service))
else:
service_prefix = ""
cmd_prefix = "crudini --get"
for config_file in config_files:
setting = "{} {} {}".format(config_file, section, param)
cmd = "{} {} {} || true".format(
service_prefix, cmd_prefix, setting)
LOG.debug("Command = '{}'".format(cmd))
result = host['client'].exec_command(cmd)
LOG.debug("Result = '{}'".format(result))
if value in result:
return True
else:
continue
if skip_if_fails:
raise cls.skipException(msg)
else:
return False
def _create_server_for_topology( def _create_server_for_topology(
self, network_id=None, port_type=None, self, network_id=None, port_type=None,
different_host=None, port_qos_policy_id=None): different_host=None, port_qos_policy_id=None):
@ -458,22 +604,40 @@ class BaseTempestTestCaseOvn(BaseTempestWhiteboxTestCase):
@classmethod @classmethod
def _get_ovn_db_monitor_cmds(cls): def _get_ovn_db_monitor_cmds(cls):
regex = r'--db=(.*)$'
# this regex search will return the connection string (tcp:IP:port or
# ssl:IP:port) and in case of TLS, will also include the TLS options
nb_monitor_connection_opts = re.search(regex, cls.nbctl).group(1)
sb_monitor_connection_opts = re.search(regex, cls.sbctl).group(1)
monitorcmdprefix = 'sudo timeout 300 ovsdb-client monitor -f json ' monitorcmdprefix = 'sudo timeout 300 ovsdb-client monitor -f json '
return (monitorcmdprefix + nb_monitor_connection_opts, if WB_CONF.openstack_type == 'podified':
monitorcmdprefix + sb_monitor_connection_opts) # (rsafrono) still need to re-check if works properly
nb_monitor_connection_opts = cls.nbctl.replace(
'ovn-nbctl', '{} punix:/tmp/ovnnb_db.sock'.format(
monitorcmdprefix.replace('sudo', '')))
sb_monitor_connection_opts = cls.sbctl.replace(
'ovn-sbctl', '{} punix:/tmp/ovsnb_db.sock'.format(
monitorcmdprefix.replace('sudo', '')))
return (nb_monitor_connection_opts, sb_monitor_connection_opts)
if WB_CONF.openstack_type == 'devstack':
regex = r'--db=(.*)$'
# this regex search will return the connection string
# (tcp:IP:port or ssl:IP:port) and in case of TLS,
# will also include the TLS options
nb_monitor_connection_opts = re.search(regex, cls.nbctl).group(1)
sb_monitor_connection_opts = re.search(regex, cls.sbctl).group(1)
return (monitorcmdprefix + nb_monitor_connection_opts,
monitorcmdprefix + sb_monitor_connection_opts)
@classmethod @classmethod
def _get_ovn_dbs(cls): def _get_ovn_dbs(cls):
if WB_CONF.openstack_type == 'podified':
sb_pod = cls.proxy_host_client.exec_command(
"oc get pods | grep ovsdbserver-sb | cut -f1 -d' '").strip()
sb_prefix = 'oc rsh {}'.format(sb_pod)
nb_prefix = sb_prefix.replace('sb', 'nb')
cmd = "{} ovn-{}ctl"
return [cmd.format(nb_prefix, 'nb'), cmd.format(sb_prefix, 'sb')]
if WB_CONF.openstack_type == 'devstack': if WB_CONF.openstack_type == 'devstack':
sbdb = "unix:/usr/local/var/run/ovn/ovnsb_db.sock" sbdb = "unix:/usr/local/var/run/ovn/ovnsb_db.sock"
nbdb = sbdb.replace('sb', 'nb') nbdb = sbdb.replace('sb', 'nb')
cmd = ("sudo ovn-{}ctl --db={}") cmd = "sudo ovn-{}ctl --db={}"
return [cmd.format('nb', nbdb), cmd.format('sb', sbdb)] return [cmd.format('nb', nbdb), cmd.format('sb', sbdb)]
def get_router_gateway_chassis(self, router_port_id): def get_router_gateway_chassis(self, router_port_id):
cmd = "{} get port_binding cr-lrp-{} chassis".format( cmd = "{} get port_binding cr-lrp-{} chassis".format(