Fix some bugs related Kubernetes MgmtDriver
After the X version was released, regression tests were performed on all functions of MgmtDriver. This patch is used to fix these bugs. 1. Add the process of getting 'FloatingIP' to kuberenetes_mgmt.py and private_registry_mgmt.py files, so that users can access the Registry and Storage VM through the floatingIP when using the CIR and Storage functions. 2. When the registry is accessed through https, add a `break` process to the while loop connection in the `_connect_to_private_registries` method in kuberenetes_mgmt.py to break out of the loop. 3. Modify the type field in podaffinity_hot_top.yaml to match the name in the nested directory. 4. When using the pod-affinity function, if multiple 'OS::Nova::ServerGroup' resources are created at the same time, the id of the resource cannot be matched to the workerNode. Modify the original process of obtaining the id directly, instead, first obtain the `physical_resource_id` of all 'OS::Nova::ServerGroup' resources, and then match the id in the workerNode information to obtain the target id. 5. Modify the method of obtaining the kubespray VM's password in kubespray_mgmt.py. Closes-Bug: #1949926 Change-Id: I2e966df35f874e14c39792a6882ee913ac424663
This commit is contained in:
@@ -287,6 +287,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
vnf_instance, grant):
|
||||
zone_id = ''
|
||||
host_compute = ''
|
||||
srv_grp_phy_res_id_list = []
|
||||
nest_resources_list = heatclient.resources.list(
|
||||
stack_id=nest_stack_id)
|
||||
for nest_res in nest_resources_list:
|
||||
@@ -297,8 +298,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
srv_grp_policies = pod_affinity_res_info.attributes.get(
|
||||
'policy')
|
||||
if srv_grp_policies and srv_grp_policies == 'anti-affinity':
|
||||
srv_grp_phy_res_id = pod_affinity_res_info.\
|
||||
physical_resource_id
|
||||
srv_grp_phy_res_id_list.append(
|
||||
pod_affinity_res_info.physical_resource_id)
|
||||
lowest_res_list = heatclient.resources.list(stack_id=stack_id)
|
||||
for lowest_res in lowest_res_list:
|
||||
if lowest_res.resource_type == 'OS::Nova::Server':
|
||||
@@ -307,7 +308,9 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
stack_id=stack_id, resource_name=lowest_res_name)
|
||||
srv_groups = worker_node_res_info.attributes.get(
|
||||
'server_groups')
|
||||
if srv_groups and srv_grp_phy_res_id in srv_groups:
|
||||
srv_grp_phy_res_id = set(
|
||||
srv_grp_phy_res_id_list) & set(srv_groups)
|
||||
if srv_grp_phy_res_id:
|
||||
host_compute = worker_node_res_info.attributes.get(
|
||||
'OS-EXT-SRV-ATTR:host')
|
||||
if self.SET_ZONE_ID_FLAG:
|
||||
@@ -648,7 +651,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
self._execute_command(
|
||||
commander, ssh_command,
|
||||
PR_CMD_TIMEOUT, 'common', 0)
|
||||
transport.close()
|
||||
break
|
||||
except paramiko.SSHException as e:
|
||||
LOG.debug(e)
|
||||
retry -= 1
|
||||
@@ -657,6 +660,13 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
commander.close_session()
|
||||
raise paramiko.SSHException()
|
||||
time.sleep(SERVER_WAIT_COMPLETE_TIME)
|
||||
except (exceptions.MgmtDriverOtherError,
|
||||
exceptions.MgmtDriverRemoteCommandError) as ex:
|
||||
LOG.error(ex)
|
||||
commander.close_session()
|
||||
raise ex
|
||||
finally:
|
||||
transport.close()
|
||||
|
||||
# connect to private registries
|
||||
for pr_info in pr_connection_info:
|
||||
@@ -2149,7 +2159,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
storage_server_param = vnf_instance.instantiated_vnf_info \
|
||||
.additional_params.get('k8s_cluster_installation_param')\
|
||||
.get('storage_server', {})
|
||||
target_ss_cp_name = storage_server_param.get('ssh_cp_name', None)
|
||||
target_ss_cp_name = storage_server_param.get('nic_cp_name', None)
|
||||
for vnfc_instance_id in heal_vnf_request.vnfc_instance_id:
|
||||
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
|
||||
vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info
|
||||
@@ -2711,9 +2721,19 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
stack_id=stack_id,
|
||||
resource_name=ssh_cp_name
|
||||
)
|
||||
ssh_ip_address = resource_info.attributes \
|
||||
.get('fixed_ips')[0].get('ip_address')
|
||||
if not ssh_ip_address:
|
||||
ssh_ip_address = resource_info.attributes.get('floating_ip_address')
|
||||
if ssh_ip_address is None and resource_info.attributes.get(
|
||||
'fixed_ips'):
|
||||
ssh_ip_address = resource_info.attributes.get(
|
||||
'fixed_ips')[0].get('ip_address')
|
||||
|
||||
try:
|
||||
ipaddress.ip_address(ssh_ip_address)
|
||||
except ValueError:
|
||||
raise exceptions.MgmtDriverOtherError(
|
||||
error_message="The IP address of "
|
||||
"Storage server VM is invalid.")
|
||||
if ssh_ip_address is None:
|
||||
raise exceptions.MgmtDriverOtherError(
|
||||
error_message="Failed to get IP address for "
|
||||
"Storage server VM")
|
||||
|
||||
@@ -33,7 +33,7 @@ resources:
|
||||
max_size: 5
|
||||
desired_capacity: 3
|
||||
resource:
|
||||
type: master_instance.hot.yaml
|
||||
type: podaffinity_nested_master.yaml
|
||||
properties:
|
||||
flavor: { get_param: [ nfv, VDU, masterNode, flavor ] }
|
||||
image: { get_param: [ nfv, VDU, masterNode, image ] }
|
||||
@@ -64,7 +64,7 @@ resources:
|
||||
max_size: 4
|
||||
desired_capacity: 2
|
||||
resource:
|
||||
type: worker_instance.hot.yaml
|
||||
type: podaffinity_nested_worker.yaml
|
||||
properties:
|
||||
flavor: { get_param: [ nfv, VDU, workerNode, flavor ] }
|
||||
image: { get_param: [ nfv, VDU, workerNode, image ] }
|
||||
|
||||
@@ -373,7 +373,7 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
'ansible_host': master_vm.get('ssh_ip'),
|
||||
'ip': master_vm.get('nic_ip'),
|
||||
'ansible_user': master_node.get('username'),
|
||||
'ansible_password': master_node.get('username'),
|
||||
'ansible_password': master_node.get('password'),
|
||||
}
|
||||
hosts_yaml_content['all']['children']['kube_control_plane'][
|
||||
'hosts'][key] = None
|
||||
@@ -386,7 +386,7 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
'ansible_host': worker_vm.get('ssh_ip'),
|
||||
'ip': worker_vm.get('nic_ip'),
|
||||
'ansible_user': worker_node.get('username'),
|
||||
'ansible_password': worker_node.get('username'),
|
||||
'ansible_password': worker_node.get('password'),
|
||||
}
|
||||
hosts_yaml_content['all']['children']['kube_node'][
|
||||
'hosts'][key] = None
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ipaddress
|
||||
import os
|
||||
import time
|
||||
|
||||
@@ -69,14 +69,19 @@ class PrivateRegistryMgmtDriver(
|
||||
# get IP address from heat
|
||||
resource_info = heatclient.resources.get(
|
||||
stack_id=stack_id, resource_name=cp_name)
|
||||
fixed_ips = resource_info.attributes.get("fixed_ips")
|
||||
if fixed_ips:
|
||||
cp_ip_address = fixed_ips[0].get("ip_address")
|
||||
else:
|
||||
cp_ip_address = ""
|
||||
cp_ip_address = resource_info.attributes.get('floating_ip_address')
|
||||
if cp_ip_address is None and resource_info.attributes.get('fixed_ips'):
|
||||
cp_ip_address = resource_info.attributes.get(
|
||||
'fixed_ips')[0].get("ip_address")
|
||||
|
||||
# check result
|
||||
if not cp_ip_address:
|
||||
try:
|
||||
ipaddress.ip_address(cp_ip_address)
|
||||
except ValueError:
|
||||
err_msg = "The IP address of Private registry VM is invalid."
|
||||
LOG.error(err_msg)
|
||||
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
|
||||
if cp_ip_address is None:
|
||||
err_msg = "Failed to get IP address for Private registry VM"
|
||||
LOG.error(err_msg)
|
||||
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
|
||||
|
||||
Reference in New Issue
Block a user