Support inventory of CRC based environment
Environment deployed with CRC has a different inventory format. This patch adds support to retrieve data from it. Applied changes to support mounted ssh key path, see [1], [2]. Also, added code to create a symlink to kubeconfig in .kube/config, if it is missing for some reason. [1] https://github.com/openstack-k8s-operators/test-operator/pull/69/commits [2] https://github.com/openstack-k8s-operators/tcib/pull/159/files Change-Id: I425410e4cbc1ea7e419b619c86345371d7884a61
This commit is contained in:
@@ -139,18 +139,22 @@ WhiteboxNeutronPluginOptions = [
|
||||
help='Intermediate host to run commands on podified '
|
||||
'environment'),
|
||||
cfg.StrOpt('proxy_host_user',
|
||||
default='',
|
||||
default='zuul',
|
||||
help='User of intermediate host to run commands on podified '
|
||||
'environment'),
|
||||
cfg.StrOpt('proxy_host_key_data',
|
||||
default='{}',
|
||||
help='Key data for accessing intermediate host on podified '
|
||||
'environment, in dict format, i.e. {"key":"key_data"}.'
|
||||
'The key_data should be key without first and last line '
|
||||
'and all new lines replaced by \n'),
|
||||
cfg.StrOpt('proxy_host_key_file',
|
||||
default='/var/lib/tempest/.ssh/id_ecdsa',
|
||||
help='Path to private key for accessing intermediate host on '
|
||||
'podified environment'),
|
||||
cfg.StrOpt('proxy_host_inventory_path',
|
||||
default='',
|
||||
default='/home/zuul/ci-framework-data/artifacts/'
|
||||
'zuul_inventory.yml',
|
||||
help='Nodes inventory on proxy host on podified environment'),
|
||||
cfg.StrOpt('kubeconfig_path',
|
||||
default='',
|
||||
help='A non-standard path to kubeconfig, if exist. If set, '
|
||||
'a symlink in ~/.kube/config will be created to allow '
|
||||
'running openshift client commands with less parameters'),
|
||||
cfg.IntOpt('servers_count',
|
||||
default=12,
|
||||
help='How many tenant VMs should be tested when many needed '
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2019 Red Hat, Inc.
|
||||
# Copyright 2024 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
@@ -69,11 +68,14 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
||||
cls.neutron_api_prefix = ''
|
||||
cls.neutron_conf = WB_CONF.neutron_config
|
||||
elif WB_CONF.openstack_type == 'podified':
|
||||
cls.proxy_host_key = cls._get_podified_proxy_host_key()
|
||||
cls.proxy_host_client = cls.get_node_client(
|
||||
host=WB_CONF.proxy_host_address,
|
||||
username=WB_CONF.proxy_host_user,
|
||||
pkey=f"{cls.proxy_host_key}")
|
||||
key_filename=WB_CONF.proxy_host_key_file)
|
||||
if WB_CONF.kubeconfig_path:
|
||||
cls.proxy_host_client.exec_command(
|
||||
"ln -s {} /home/{}/.kube/config || true".format(
|
||||
WB_CONF.kubeconfig_path, WB_CONF.proxy_host_user))
|
||||
cls.master_node_client = cls.proxy_host_client
|
||||
cls.master_cont_cmd_executor = cls.proxy_host_client
|
||||
else:
|
||||
@@ -162,23 +164,23 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
||||
@classmethod
|
||||
def get_podified_nodes_data(cls):
|
||||
|
||||
def append_node_data(node, node_group_data):
|
||||
if 'ocp' in node:
|
||||
node_name = node.replace("ocp", "master")
|
||||
def append_node_data(node, is_crc):
|
||||
if 'controller' in node:
|
||||
return
|
||||
if 'ocp' in node and not is_crc:
|
||||
# a specific ssh key is used for accessing ocp nodes
|
||||
key = 'ansible_ssh_private_key_file' # meaning dict key here
|
||||
# save path of ocp nodes key (if not yet), we'll need it later
|
||||
if not hasattr(cls, 'ocp_nodes_key_path'):
|
||||
cls.ocp_nodes_key_path = (
|
||||
node_group_data[node][key].replace(
|
||||
hosts_data[node][key].replace(
|
||||
'~', '/home/{}'.format(WB_CONF.proxy_host_user)))
|
||||
node_key = node_group_data[node][key].split('/')[-1]
|
||||
node_key = hosts_data[node][key].split('/')[-1]
|
||||
else:
|
||||
node_name = node
|
||||
node_key = 'id_cifw_key'
|
||||
node_data = {
|
||||
'name': node_name,
|
||||
'ip': node_group_data[node]['ansible_host'],
|
||||
'user': node_group_data[node]['ansible_user'],
|
||||
'ip': hosts_data[node]['ansible_host'],
|
||||
'user': hosts_data[node]['ansible_user'],
|
||||
'key': node_key}
|
||||
nodes.append(node_data)
|
||||
|
||||
@@ -186,64 +188,30 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
||||
inventory_data = yaml.safe_load(
|
||||
cls.proxy_host_client.exec_command(
|
||||
'cat ' + WB_CONF.proxy_host_inventory_path))
|
||||
computes_data = inventory_data['computes']['hosts']
|
||||
for node in computes_data:
|
||||
append_node_data(node, computes_data)
|
||||
ocps_data = inventory_data['ocps']['hosts']
|
||||
for node in ocps_data:
|
||||
append_node_data(node, ocps_data)
|
||||
is_crc = False
|
||||
ocps = inventory_data['all']['children']['ocps']
|
||||
if 'hosts' in ocps and 'crc' in ocps['hosts'].keys():
|
||||
is_crc = True
|
||||
LOG.debug("Environment is{} based on CRC".format(
|
||||
"" if is_crc else "n't"))
|
||||
items = inventory_data['all']['children']
|
||||
hosts_data = {}
|
||||
host_names = []
|
||||
for host_type in items.keys():
|
||||
if is_crc:
|
||||
host_names.extend(
|
||||
list(items[host_type]['hosts'].keys()))
|
||||
for host_name in host_names:
|
||||
hosts_data[host_name] = (
|
||||
inventory_data['all']['hosts'][host_name])
|
||||
else:
|
||||
hosts_data.update(inventory_data[host_type]['hosts'])
|
||||
for host in hosts_data:
|
||||
append_node_data(host, is_crc)
|
||||
return nodes
|
||||
|
||||
@classmethod
|
||||
def _get_podified_proxy_host_key(cls):
|
||||
start = '-----BEGIN OPENSSH PRIVATE KEY-----\n'
|
||||
end = '-----END OPENSSH PRIVATE KEY-----\n'
|
||||
key = json.loads(WB_CONF.proxy_host_key_data)['key']
|
||||
return '{}{}{}'.format(start, key, end)
|
||||
|
||||
@classmethod
|
||||
def append_node(cls, host, is_compute=False, is_networker=False):
|
||||
hostname = host.split('.')[0]
|
||||
for node in cls.nodes:
|
||||
if node['name'] == hostname:
|
||||
if not node['is_networker']:
|
||||
node['is_networker'] = is_networker
|
||||
if not node['is_compute']:
|
||||
node['is_compute'] = is_compute
|
||||
return
|
||||
if WB_CONF.openstack_type == 'podified':
|
||||
for node in cls.nodes_data:
|
||||
LOG.debug(
|
||||
"hostname='{}', name='{}'".format(hostname, node['name']))
|
||||
if node['name'] == hostname:
|
||||
extra_params = {
|
||||
'client': cls.get_node_client(
|
||||
host=node['ip'], username=node['user'],
|
||||
pkey=f"{cls.keys_data[node['key']]}")}
|
||||
break
|
||||
else:
|
||||
extra_params = {'client': cls.get_node_client(host)}
|
||||
params = {'name': hostname,
|
||||
'is_networker': is_networker,
|
||||
'is_controller': False,
|
||||
'is_compute': is_compute}
|
||||
node = {**params, **extra_params}
|
||||
# Here we are checking if there are controller-specific
|
||||
# processes running on the node
|
||||
output = node['client'].exec_command(
|
||||
r"ps ax | grep 'rabbit\|galera' | grep -v grep || true")
|
||||
if output.strip() != "":
|
||||
node['is_controller'] = True
|
||||
cls.nodes.append(node)
|
||||
|
||||
@classmethod
|
||||
def discover_nodes(cls):
|
||||
if WB_CONF.openstack_type == 'podified':
|
||||
cls.nodes_data = cls.get_podified_nodes_data()
|
||||
cls.keys_data = {
|
||||
'id_cifw_key': cls.proxy_host_key,
|
||||
'devscripts_key': cls.proxy_host_client.exec_command(
|
||||
'cat ' + cls.ocp_nodes_key_path)}
|
||||
agents = cls.os_admin.network.AgentsClient().list_agents()['agents']
|
||||
if cls.has_ovn_support:
|
||||
l3_agent_hosts = [
|
||||
@@ -256,11 +224,37 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
||||
compute_hosts = [
|
||||
host['hypervisor_hostname'] for host
|
||||
in cls.os_admin.hv_client.list_hypervisors()['hypervisors']]
|
||||
cls.nodes = []
|
||||
for host in compute_hosts:
|
||||
cls.append_node(host, is_compute=True)
|
||||
for host in l3_agent_hosts:
|
||||
cls.append_node(host, is_networker=True)
|
||||
if WB_CONF.openstack_type == 'podified':
|
||||
cls.nodes = cls.get_podified_nodes_data()
|
||||
with open(WB_CONF.proxy_host_key_file, 'r') as file:
|
||||
id_cifw_key = file.read()
|
||||
cls.keys_data = {
|
||||
'id_cifw_key': id_cifw_key}
|
||||
if hasattr(cls, 'ocp_nodes_key_path'):
|
||||
devscripts_key = cls.proxy_host_client.exec_command(
|
||||
'cat ' + cls.ocp_nodes_key_path)
|
||||
cls.keys_data['devscripts_key'] = devscripts_key
|
||||
for host in cls.nodes:
|
||||
client = cls.get_node_client(
|
||||
host=host['ip'], username=host['user'],
|
||||
pkey=f"{cls.keys_data[host['key']]}")
|
||||
host['client'] = client
|
||||
else:
|
||||
cls.nodes = []
|
||||
for host in set([*l3_agent_hosts, *compute_hosts]):
|
||||
cls.nodes.append(
|
||||
{'ip': host, 'client': cls.get_node_client(host)})
|
||||
for host in cls.nodes:
|
||||
host['name'] = host['client'].exec_command('hostname').strip()
|
||||
# Here we are checking if there are controller-specific
|
||||
# processes running on the node
|
||||
output = host['client'].exec_command(
|
||||
r"ps ax | grep 'rabbit\|galera' | grep -v grep || true")
|
||||
host['is_controller'] = True if output.strip() != "" else False
|
||||
host['is_compute'] = (
|
||||
True if host['name'] in compute_hosts else False)
|
||||
host['is_networker'] = (
|
||||
True if host['name'] in l3_agent_hosts else False)
|
||||
|
||||
@classmethod
|
||||
def get_pod_of_service(cls, service='neutron'):
|
||||
|
Reference in New Issue
Block a user