Specify openstack namespace in oc commands

Recent version of podified deployment does not have contexts
configured in .kube/config and 'oc' commands do not work
as expected. This patch adds openstack namespace parameter
when running 'oc' commands.
Additionally, on the recent version of podified deployment
compute nodes have names with domain name specified. This
caused tests that capture traffic on nodes behave not correctly.
This patch fixes this.

Change-Id: I888effbdc894a9e65bd0e92164456db096bb4875
This commit is contained in:
Roman Safronov 2024-04-16 21:42:04 +03:00
parent b74274bd67
commit 6feb524aa3
3 changed files with 32 additions and 19 deletions

View File

@ -73,6 +73,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
cls.master_cont_cmd_executor = cls.run_on_master_controller cls.master_cont_cmd_executor = cls.run_on_master_controller
cls.neutron_api_prefix = '' cls.neutron_api_prefix = ''
elif WB_CONF.openstack_type == 'podified': elif WB_CONF.openstack_type == 'podified':
cls.OC = "oc -n openstack "
cls.proxy_host_client = cls.get_node_client( cls.proxy_host_client = cls.get_node_client(
host=WB_CONF.proxy_host_address, host=WB_CONF.proxy_host_address,
username=WB_CONF.proxy_host_user, username=WB_CONF.proxy_host_user,
@ -83,8 +84,8 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
WB_CONF.kubeconfig_path, WB_CONF.proxy_host_user)) WB_CONF.kubeconfig_path, WB_CONF.proxy_host_user))
cls.master_node_client = cls.proxy_host_client cls.master_node_client = cls.proxy_host_client
cls.master_cont_cmd_executor = cls.proxy_host_client.exec_command cls.master_cont_cmd_executor = cls.proxy_host_client.exec_command
cls.neutron_api_prefix = 'oc rsh {} '.format( cls.neutron_api_prefix = '{} rsh {} '.format(
cls.get_pod_of_service()) cls.OC, cls.get_pod_of_service())
else: else:
LOG.warning(("Unrecognized deployer tool '{}', plugin supports " LOG.warning(("Unrecognized deployer tool '{}', plugin supports "
"openstack_type as devstack/podified.".format( "openstack_type as devstack/podified.".format(
@ -165,6 +166,16 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
"Not able to find a different compute than: {}".format( "Not able to find a different compute than: {}".format(
exclude_hosts)) exclude_hosts))
@classmethod
def get_full_name(cls, hostname):
compute_hosts = [
host['hypervisor_hostname'] for host
in cls.os_admin.hv_client.list_hypervisors()['hypervisors']]
for host_name in compute_hosts:
if hostname in host_name:
return host_name
return hostname
@staticmethod @staticmethod
def _get_local_ip_from_network(network): def _get_local_ip_from_network(network):
host_ip_addresses = [ifaddresses(iface)[AF_INET][0]['addr'] host_ip_addresses = [ifaddresses(iface)[AF_INET][0]['addr']
@ -268,7 +279,8 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
cls.nodes.append( cls.nodes.append(
{'ip': host, 'client': cls.get_node_client(host)}) {'ip': host, 'client': cls.get_node_client(host)})
for host in cls.nodes: for host in cls.nodes:
host['name'] = host['client'].exec_command('hostname').strip() host['name'] = cls.get_full_name(
host['client'].exec_command('hostname').strip())
# Here we are checking if there are controller-specific # Here we are checking if there are controller-specific
# processes running on the node # processes running on the node
output = host['client'].exec_command( output = host['client'].exec_command(
@ -292,7 +304,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
@classmethod @classmethod
def get_pod_of_service(cls, service='neutron'): def get_pod_of_service(cls, service='neutron'):
pods_list = "oc get pods" pods_list = "{} get pods".format(cls.OC)
if service == 'neutron': if service == 'neutron':
filters = "grep neutron | grep -v meta | cut -d' ' -f1" filters = "grep neutron | grep -v meta | cut -d' ' -f1"
else: else:
@ -307,8 +319,8 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
if service == 'neutron': if service == 'neutron':
pod = cls.get_pod_of_service(service) pod = cls.get_pod_of_service(service)
return cls.proxy_host_client.exec_command( return cls.proxy_host_client.exec_command(
'oc rsh -n openstack {} find {} -type f'.format( '{} rsh {} find {} -type f'.format(
pod, os.path.split( cls.OC, pod, os.path.split(
WB_CONF.neutron_config)[0])).strip().split('\n') WB_CONF.neutron_config)[0])).strip().split('\n')
# TODO(mblue): next gen computes configuration set should be done too, # TODO(mblue): next gen computes configuration set should be done too,
@ -341,8 +353,8 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
[{}] [{}]
{} = {}'''.format( {} = {}'''.format(
service, section, param, value) service, section, param, value)
cmd = "oc patch $(oc get oscp -o name) --type=merge --patch '" + \ cmd = ("{0} patch $({0} get oscp -o name) --type=merge "
patch_buffer + "'" "--patch '".format(cls.OC) + patch_buffer + "'")
LOG.debug("Set configuration command:\n%s", cmd) LOG.debug("Set configuration command:\n%s", cmd)
output = cls.proxy_host_client.exec_command(cmd) output = cls.proxy_host_client.exec_command(cmd)
LOG.debug("Output:\n%s", output) LOG.debug("Output:\n%s", output)
@ -375,8 +387,8 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
""" """
if WB_CONF.openstack_type == 'podified': if WB_CONF.openstack_type == 'podified':
service_prefix = "oc rsh -n openstack {}".format( service_prefix = "{} rsh {}".format(
cls.get_pod_of_service(service)) cls.OC, cls.get_pod_of_service(service))
else: else:
service_prefix = "" service_prefix = ""
cmd_prefix = "crudini --get" cmd_prefix = "crudini --get"
@ -640,7 +652,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
def get_osp_cmd_prefix(cls, admin=True): def get_osp_cmd_prefix(cls, admin=True):
# TODO(mblue): figure how admin used in podified setup when needed # TODO(mblue): figure how admin used in podified setup when needed
if WB_CONF.openstack_type == 'podified': if WB_CONF.openstack_type == 'podified':
prefix = 'oc rsh -n openstack openstackclient ' prefix = '{} rsh openstackclient '.format(cls.OC)
elif WB_CONF.openstack_type == 'devstack': elif WB_CONF.openstack_type == 'devstack':
prefix = '. /opt/stack/devstack/openrc{} && '.format( prefix = '. /opt/stack/devstack/openrc{} && '.format(
' admin' if admin else '') ' admin' if admin else '')
@ -1139,8 +1151,9 @@ class BaseTempestTestCaseOvn(BaseTempestWhiteboxTestCase):
def _get_ovn_dbs(cls): def _get_ovn_dbs(cls):
if WB_CONF.openstack_type == 'podified': if WB_CONF.openstack_type == 'podified':
sb_pod = cls.proxy_host_client.exec_command( sb_pod = cls.proxy_host_client.exec_command(
"oc get pods | grep ovsdbserver-sb | cut -f1 -d' '").strip() "{} get pods | grep ovsdbserver-sb | "
sb_prefix = 'oc rsh -n openstack {}'.format(sb_pod) "cut -f1 -d' '".format(cls.OC)).strip()
sb_prefix = '{} rsh {}'.format(cls.OC, sb_pod)
nb_prefix = sb_prefix.replace('sb', 'nb') nb_prefix = sb_prefix.replace('sb', 'nb')
cmd = "{} ovn-{}ctl" cmd = "{} ovn-{}ctl"
return [cmd.format(nb_prefix, 'nb'), cmd.format(sb_prefix, 'sb')] return [cmd.format(nb_prefix, 'nb'), cmd.format(sb_prefix, 'sb')]

View File

@ -279,7 +279,7 @@ class OvnDvrTest(OvnDvrBase):
self._setup() self._setup()
server2 = self._create_server(exclude_hosts=self.exclude_hosts) server2 = self._create_server(exclude_hosts=self.exclude_hosts)
compute2 = self.get_host_for_server( compute2 = self.get_host_for_server(
server2['server']['id']).split('.')[0] server2['server']['id'])
LOG.debug("compute = {}, compute2 = {}".format(self.compute, compute2)) LOG.debug("compute = {}, compute2 = {}".format(self.compute, compute2))
if self.compute == compute2: if self.compute == compute2:
self.skipTest( self.skipTest(

View File

@ -107,20 +107,20 @@ class ProviderNetworkSriovBaseTest(base.ProviderBaseTest):
# or more than one cell will be supported in the future # or more than one cell will be supported in the future
nova_scheduler_pod = cls.get_pod_of_service("nova-scheduler") nova_scheduler_pod = cls.get_pod_of_service("nova-scheduler")
cells = cls.proxy_host_client.exec_command( cells = cls.proxy_host_client.exec_command(
"oc rsh {} nova-manage cell_v2 list_hosts | grep compute | " "{} rsh {} nova-manage cell_v2 list_hosts | grep compute | "
"tr -d '|' | tr -s ' ' ".format(nova_scheduler_pod) + "| " "tr -d '|' | tr -s ' ' ".format(cls.OC, nova_scheduler_pod) + "| "
"awk '{print $1}' | uniq").strip().split() "awk '{print $1}' | uniq").strip().split()
if len(cells) != 1: if len(cells) != 1:
cls.fail("Currently only environments with a single cell " cls.fail("Currently only environments with a single cell "
"are supported") "are supported")
galera_pod = cls.get_pod_of_service( galera_pod = cls.get_pod_of_service(
'openstack-{}-galera-0'.format(cells[0])) 'openstack-{}-galera-0'.format(cells[0]))
galera_db_exec = "oc rsh {}".format(galera_pod) galera_db_exec = "{} rsh {}".format(cls.OC, galera_pod)
data_filter = ".data.Nova{}DatabasePassword|base64decode".format( data_filter = ".data.Nova{}DatabasePassword|base64decode".format(
cells[0].capitalize()) cells[0].capitalize())
db_password = cls.proxy_host_client.exec_command( db_password = cls.proxy_host_client.exec_command(
"oc get secret osp-secret -o go-template --template=" "{} get secret osp-secret -o go-template --template=".format(
"\"{{" + data_filter + "}}\"").strip() cls.OC) + "\"{{" + data_filter + "}}\"").strip()
db_credentials = "-u root -p{}".format(db_password) db_credentials = "-u root -p{}".format(db_password)
mysql_cmd = ('mysql --skip-column-names {} nova_{} -e ' mysql_cmd = ('mysql --skip-column-names {} nova_{} -e '
'"select pci_stats from compute_nodes;"'.format( '"select pci_stats from compute_nodes;"'.format(