Merge "Adjust partition sysinv data from template"
This commit is contained in:
commit
b42950fd87
@ -5,12 +5,11 @@
|
||||
#
|
||||
# This is a data migration script to pickup the partition changes during
|
||||
# upgrade to Debian OS.
|
||||
# The migration refreshes the i_idisk, partition, i_pv and i_lvg tables
|
||||
# The migration refreshes the partition, i_pv and i_lvg tables
|
||||
# with the new partition configuration on filessytem of Debian StarlingX
|
||||
# after controller-1 is upgraded.
|
||||
#
|
||||
|
||||
import json
|
||||
import copy
|
||||
import sys
|
||||
import psycopg2
|
||||
from controllerconfig.common import log
|
||||
@ -21,10 +20,7 @@ import uuid
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.agent import disk as Disk
|
||||
from sysinv.agent import lvg as Lvg
|
||||
from sysinv.agent import partition as Partition
|
||||
from sysinv.agent import pv as Pv
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
@ -62,28 +58,53 @@ def main():
|
||||
return res
|
||||
|
||||
|
||||
IDISK_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid', \
|
||||
'device_node', 'device_num', 'device_type', 'size_mib', \
|
||||
'serial_id', 'capabilities', 'forihostid', 'foristorid', \
|
||||
'foripvid', 'rpm', 'device_id', 'device_path', 'device_wwn', \
|
||||
'available_mib'
|
||||
|
||||
IPARTITION_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid', \
|
||||
'start_mib', 'end_mib', 'size_mib', 'device_path', \
|
||||
'type_guid', 'type_name', 'idisk_id', 'idisk_uuid', \
|
||||
'capabilities', 'status', 'foripvid', 'forihostid', \
|
||||
'device_node'
|
||||
|
||||
LVG_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid', 'vg_state', \
|
||||
'lvm_vg_name', 'lvm_vg_uuid', 'lvm_vg_access', 'lvm_max_lv', \
|
||||
'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv', 'lvm_vg_size', \
|
||||
'lvm_vg_total_pe', 'lvm_vg_free_pe', 'capabilities', 'forihostid'
|
||||
# worker node partition template
|
||||
WORKER_PARTITION_LIST = [
|
||||
{'start_mib': '1', 'end_mib': '2', 'size_mib': '1',
|
||||
'type_guid': '21686148-6449-6e6f-744e-656564454649',
|
||||
'type_name': 'BIOS boot partition'},
|
||||
{'start_mib': '2', 'end_mib': '302', 'size_mib': '300',
|
||||
'type_guid': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b',
|
||||
'type_name': 'EFI system partition'},
|
||||
{'start_mib': '302', 'end_mib': '802', 'size_mib': '500',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '802', 'end_mib': '21282', 'size_mib': '20480',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '21282', 'end_mib': '41762', 'size_mib': '20480',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '41762', 'end_mib': '112418', 'size_mib': '70656',
|
||||
'type_guid': 'e6d6d379-f507-44c2-a23c-238f2a3df928',
|
||||
'type_name': 'Linux LVM'}]
|
||||
|
||||
IPV_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid', 'pv_state', \
|
||||
'pv_type', 'disk_or_part_uuid', 'disk_or_part_device_node', \
|
||||
'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid', 'lvm_pv_size', \
|
||||
'lvm_pe_total', 'lvm_pe_alloced', 'capabilities', 'forihostid', \
|
||||
'forilvgid', 'disk_or_part_device_path'
|
||||
# storage node partition template
|
||||
STORAGE_PARTITION_LIST = [
|
||||
{'start_mib': '1', 'end_mib': '2', 'size_mib': '1',
|
||||
'type_guid': '21686148-6449-6e6f-744e-656564454649',
|
||||
'type_name': 'BIOS boot partition'},
|
||||
{'start_mib': '2', 'end_mib': '302', 'size_mib': '300',
|
||||
'type_guid': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b',
|
||||
'type_name': 'EFI system partition'},
|
||||
{'start_mib': '302', 'end_mib': '802', 'size_mib': '500',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '802', 'end_mib': '21282', 'size_mib': '20480',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '21282', 'end_mib': '41762', 'size_mib': '20480',
|
||||
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
|
||||
'type_name': 'Linux filesystem'},
|
||||
{'start_mib': '41762', 'end_mib': '381555', 'size_mib': '339793',
|
||||
'type_guid': 'e6d6d379-f507-44c2-a23c-238f2a3df928',
|
||||
'type_name': 'Linux LVM'}]
|
||||
|
||||
|
||||
def get_disk_uuid_mapping(conn, forihostid):
|
||||
@ -99,52 +120,17 @@ def get_disk_uuid_mapping(conn, forihostid):
|
||||
return mappings
|
||||
|
||||
|
||||
def get_idisks(forihostid, uuid_mapping):
|
||||
do = Disk.DiskOperator()
|
||||
disks = do.idisk_get()
|
||||
idisks = []
|
||||
now = datetime.now()
|
||||
disk_additions = {
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"deleted_at": None,
|
||||
"forihostid": forihostid,
|
||||
# foripvid will be populated when updating i_pv table
|
||||
"foripvid": None,
|
||||
# ToDo missing foristorid
|
||||
"foristorid": None,
|
||||
}
|
||||
|
||||
for disk in disks:
|
||||
# find uuid
|
||||
device_node = disk["device_node"]
|
||||
if device_node in uuid_mapping:
|
||||
disk_additions["uuid"] = uuid_mapping[device_node]
|
||||
else:
|
||||
# this is not good, but it could be a new disk
|
||||
LOG.warn("Found disk %s that is not inventoried" % device_node)
|
||||
new_uuid = "%s" % uuid.uuid4()
|
||||
disk_additions["uuid"] = new_uuid
|
||||
LOG.info("Assign uuid %s to %s" % (new_uuid, device_node))
|
||||
disk.update(disk_additions)
|
||||
|
||||
idisk = []
|
||||
# the IDISK_COLUMNS is the order of insert statement,
|
||||
# sort the values in the same order below to form
|
||||
# SQL insert statement
|
||||
for col in IDISK_COLUMNS:
|
||||
if col == "capabilities":
|
||||
if disk[col] is None:
|
||||
idisk.append(None)
|
||||
else:
|
||||
idisk.append(json.dumps(disk[col]))
|
||||
else:
|
||||
idisk.append(disk[col])
|
||||
idisks.append(idisk)
|
||||
return idisks
|
||||
def get_idisks(conn, forihostid):
|
||||
# do not consider disk change (replace, remove, or add new disk)
|
||||
# during upgrade
|
||||
sql = "SELECT * FROM i_idisk WHERE forihostid = %s;"
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute(sql, (forihostid, ))
|
||||
idisks = cur.fetchall()
|
||||
return idisks
|
||||
|
||||
|
||||
def get_ihost(conn):
|
||||
def get_cur_host(conn):
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute("SELECT system_mode FROM i_system;")
|
||||
system = cur.fetchone()
|
||||
@ -161,67 +147,6 @@ def get_ihost(conn):
|
||||
return instance
|
||||
|
||||
|
||||
def update_disks(conn, idisks):
|
||||
new_disks = []
|
||||
forihostid = None
|
||||
idx = IDISK_COLUMNS.index("forihostid")
|
||||
dp_idx = IDISK_COLUMNS.index("device_path")
|
||||
uuid_idx = IDISK_COLUMNS.index("uuid")
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
for idisk in idisks:
|
||||
device_path = idisk[dp_idx]
|
||||
if forihostid is None:
|
||||
forihostid = idisk[idx]
|
||||
elif forihostid != idisk[idx]:
|
||||
raise Exception("Bug: can only handle disks for 1 host at"
|
||||
"a time")
|
||||
|
||||
setters = ", ".join(["%s=%%s"] * len(IDISK_COLUMNS))
|
||||
sql_tmp = "UPDATE i_idisk SET %s where forihostid=%s and " \
|
||||
"device_path=%s" % (setters, "%%s", "%%s")
|
||||
sql = sql_tmp % IDISK_COLUMNS
|
||||
|
||||
params = tuple(idisk) + (forihostid, device_path)
|
||||
cur.execute(sql, params)
|
||||
if cur.rowcount == 0:
|
||||
LOG.debug("new disk %s" % device_path)
|
||||
new_disks.append(idisk)
|
||||
else:
|
||||
LOG.debug("update %s" % device_path)
|
||||
|
||||
for idisk in new_disks:
|
||||
# TODO: untest path
|
||||
device_path = idisk[dp_idx]
|
||||
col_fmt = ", ".join(["%s"] * len(IDISK_COLUMNS))
|
||||
values_fmt = ", ".join(["%%s"] * len(IDISK_COLUMNS))
|
||||
sql_fmt = "INSERT INTO i_idisk (%s) VALUES(%s)" % \
|
||||
(col_fmt, values_fmt)
|
||||
sql = sql_fmt % IDISK_COLUMNS
|
||||
cur.execute(sql, idisk)
|
||||
if cur.rowcount == 1:
|
||||
LOG.info("Add new disk %s, %s" %
|
||||
(device_path, idisk[uuid_idx]))
|
||||
|
||||
device_paths = [d[dp_idx] for d in idisks]
|
||||
# delete the disks that no longer exist
|
||||
in_list = ', '.join(['%s'] * len(device_paths))
|
||||
sql = "DELETE FROM i_idisk where forihostid = %s and " \
|
||||
"not device_path in (%s)" % (forihostid, in_list)
|
||||
device_paths = tuple(device_paths)
|
||||
cur.execute(sql, device_paths)
|
||||
count = cur.rowcount
|
||||
if count > 0:
|
||||
LOG.info("%s disks no longer exist" % count)
|
||||
|
||||
sql = "SELECT id, uuid, device_node, device_path FROM i_idisk " \
|
||||
"WHERE forihostid = %s"
|
||||
cur.execute(sql, (forihostid, ))
|
||||
disks = [{"id": d[0], "uuid": d[1], "device_node": d[2],
|
||||
"device_path": d[3], "type": "disk"}
|
||||
for d in cur.fetchall()]
|
||||
return disks
|
||||
|
||||
|
||||
def get_disk_by_device_node(disks, device_path):
|
||||
for disk in disks:
|
||||
if disk["device_path"] in device_path:
|
||||
@ -250,30 +175,26 @@ def get_rootdisk_partitions(conn, forihostid):
|
||||
partitions = []
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute(sql, (forihostid, forihostid))
|
||||
for rec in cur.fetchall():
|
||||
partition = []
|
||||
for idx in range(len(rec)):
|
||||
partition.append(rec[idx])
|
||||
partitions.append(partition)
|
||||
|
||||
partitions = cur.fetchall()
|
||||
return partitions
|
||||
|
||||
|
||||
def get_ipartitions(forihostid, disks):
|
||||
# return ipartitions list sorted by physical order (start_mib)
|
||||
def get_controller_partition_template(rootdisk):
|
||||
# return list of partitions created on rootdisk, sorted by physical
|
||||
# order (start_mib)
|
||||
|
||||
root_device_path = rootdisk["device_path"]
|
||||
root_device_node = rootdisk["device_node"]
|
||||
po = Partition.PartitionOperator()
|
||||
partitions = po.ipartition_get(skip_gpt_check=True)
|
||||
|
||||
# sort by start_mib
|
||||
partitions = sorted(partitions, key=operator.itemgetter('start_mib'))
|
||||
now = datetime.now()
|
||||
ipartitions = []
|
||||
|
||||
partition_additions = {
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"deleted_at": None,
|
||||
"forihostid": forihostid,
|
||||
# foripvid will be populated when updating i_pv table
|
||||
"foripvid": None,
|
||||
# TODO: check to load capabilities
|
||||
@ -282,22 +203,64 @@ def get_ipartitions(forihostid, disks):
|
||||
"status": 1
|
||||
}
|
||||
|
||||
bootdisk_partitions = []
|
||||
for partition in partitions:
|
||||
# regenerate uuid
|
||||
partition_additions["uuid"] = "%s" % uuid.uuid4()
|
||||
partition.update(partition_additions)
|
||||
part_device_path = partition["device_path"]
|
||||
if is_device_path_on_disk(part_device_path, root_device_path,
|
||||
root_device_node):
|
||||
partition["device_path"] = None
|
||||
partition["device_node"] = None
|
||||
bootdisk_partitions.append(partition)
|
||||
return sorted(bootdisk_partitions, key=operator.itemgetter('start_mib'))
|
||||
|
||||
|
||||
def get_node_partition_template(part_list):
|
||||
# create a partition template from a list of partitions
|
||||
template = copy.deepcopy(part_list)
|
||||
|
||||
now = datetime.now()
|
||||
partition_additions = {
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"deleted_at": None,
|
||||
# foripvid will be populated when updating i_pv table
|
||||
"foripvid": None,
|
||||
# TODO: check to load capabilities
|
||||
"capabilities": None,
|
||||
# These are the partitions that have already created
|
||||
"status": 1,
|
||||
"device_path": None,
|
||||
"device_node": None
|
||||
}
|
||||
|
||||
for partition in template:
|
||||
partition.update(partition_additions)
|
||||
|
||||
disk = get_disk_by_device_node(disks, partition["device_path"])
|
||||
partition["idisk_id"] = disk["id"]
|
||||
partition["idisk_uuid"] = disk["uuid"]
|
||||
return template
|
||||
|
||||
ipartition = []
|
||||
# the IPARTITION_COLUMNS is the order of insert statement,
|
||||
# sort the values in the same order below to form
|
||||
# SQL statements
|
||||
ipartition = [partition[col] for col in IPARTITION_COLUMNS]
|
||||
ipartitions.append(ipartition)
|
||||
return ipartitions
|
||||
|
||||
def get_ipartitions(forihostid, template, rootdisk):
|
||||
# localize default partitions on rootdisk
|
||||
partitions = copy.deepcopy(template)
|
||||
|
||||
rootdisk_device_node = rootdisk["device_node"]
|
||||
rootdisk_device_path = rootdisk["device_path"]
|
||||
idx = 1
|
||||
for partition in partitions:
|
||||
# regenerate uuid
|
||||
partition["uuid"] = "%s" % uuid.uuid4()
|
||||
partition["idisk_id"] = rootdisk["id"]
|
||||
partition["idisk_uuid"] = rootdisk["uuid"]
|
||||
partition["forihostid"] = forihostid
|
||||
device_node, device_path = \
|
||||
build_device_node_path(rootdisk_device_node, rootdisk_device_path,
|
||||
idx)
|
||||
partition["device_node"] = device_node
|
||||
partition["device_path"] = device_path
|
||||
idx += 1
|
||||
|
||||
return partitions
|
||||
|
||||
|
||||
def build_device_node_path(disk_device_node, disk_device_path, device_idx):
|
||||
@ -323,91 +286,88 @@ def is_device_path_on_disk(device_path, disk_device_path, disk_device_node):
|
||||
return False
|
||||
|
||||
|
||||
def append_additional_partitions(conn, ipartitions_all, forihostid, rootdisk):
|
||||
def append_additional_partitions(conn, new_rootdisk_partitions,
|
||||
host, rootdisk):
|
||||
# append user created partitions on rootdisk from the N release
|
||||
# new_rootdisk_partitions is new default partitions on root disk
|
||||
# will append additional user partitions on root disk to the list
|
||||
# to form the entier partition list on root disk
|
||||
|
||||
forihostid = host["id"]
|
||||
personality = host["personality"]
|
||||
# get partitions on rootdisk from N db
|
||||
rootdisk_partitions = get_rootdisk_partitions(conn, forihostid)
|
||||
|
||||
start_mib_idx = IPARTITION_COLUMNS.index("start_mib")
|
||||
end_mib_idx = IPARTITION_COLUMNS.index("end_mib")
|
||||
device_node_idx = IPARTITION_COLUMNS.index("device_node")
|
||||
device_path_idx = IPARTITION_COLUMNS.index("device_path")
|
||||
status_idx = IPARTITION_COLUMNS.index('status')
|
||||
foripvid_idx = IPARTITION_COLUMNS.index('foripvid')
|
||||
rootdisk_device_node = rootdisk["device_node"]
|
||||
rootdisk_device_path = rootdisk["device_path"]
|
||||
|
||||
disk_device_node_idx = IDISK_COLUMNS.index("device_node")
|
||||
disk_device_path_idx = IDISK_COLUMNS.index("device_path")
|
||||
|
||||
rootdisk_device_node = rootdisk[disk_device_node_idx]
|
||||
rootdisk_device_path = rootdisk[disk_device_path_idx]
|
||||
|
||||
ipartitions_rootdisk = []
|
||||
LOG.debug("ipartitions_all %s" % ipartitions_all)
|
||||
for ipartition in ipartitions_all:
|
||||
part_device_path = ipartition[device_path_idx]
|
||||
if is_device_path_on_disk(part_device_path, rootdisk_device_path,
|
||||
rootdisk_device_node):
|
||||
ipartitions_rootdisk.append(ipartition)
|
||||
LOG.debug("ipartitions on root disk %s \n%s" %
|
||||
(rootdisk_device_path, ipartitions_rootdisk))
|
||||
LOG.info("Previous release ipartitions on root disk %s \n%s" %
|
||||
(rootdisk_device_path, rootdisk_partitions))
|
||||
|
||||
# get the end mib for the last default partition from release N+1
|
||||
new_end_mib = ipartitions_all[-1][end_mib_idx]
|
||||
new_end_mib = new_rootdisk_partitions[-1]["end_mib"]
|
||||
|
||||
end_mib_default_cgts_vg = None
|
||||
end_mib_default_partition = None
|
||||
foripvid = None
|
||||
|
||||
# old and new device_path mapping.
|
||||
# device_path_mapping = {}
|
||||
# find the last default partition in ordered list. All default
|
||||
# partitions will be replaced with new default partitions.
|
||||
last_default_partition_idx = -1
|
||||
for idx in range(0, len(rootdisk_partitions)):
|
||||
partition = rootdisk_partitions[idx]
|
||||
if partition["lvm_vg_name"] == "cgts-vg":
|
||||
# found the 1st cgts-vg.
|
||||
# In pre Debian load, it is the last default partition on
|
||||
# controller and storage nodes. It is the 2nd last default
|
||||
# partition on worker nodes.
|
||||
# TODO: bqian: in Debian load (as N release), the first cgts-vg
|
||||
# partition is the last default partition for all node types
|
||||
if personality == "controller":
|
||||
last_default_partition_idx = idx
|
||||
elif personality == "worker":
|
||||
last_default_partition_idx = idx + 1
|
||||
elif personality == "storage":
|
||||
last_default_partition_idx = idx
|
||||
|
||||
for ipartition in rootdisk_partitions:
|
||||
if end_mib_default_cgts_vg is None:
|
||||
# the cgts-vg 1st pv is the end of default partition. Partitions
|
||||
# created before it will be replaced with new default partitions
|
||||
# in N+1 release. The N+1 default partitions and partitions on
|
||||
# the other disk (non-root disk) are retrieved from
|
||||
# get_ipartitions.
|
||||
foripvid = partition["foripvid"]
|
||||
new_rootdisk_partitions[-1]["foripvid"] = foripvid
|
||||
break
|
||||
|
||||
# in get_rootdisk_partitions, lvm_vg_name column is appended
|
||||
# to the end of all partition column.
|
||||
LOG.info("DEBUG: partition %s is for lvm_vg %s" %
|
||||
(ipartition[device_node_idx], ipartition[-1]))
|
||||
if ipartition[-1] == "cgts-vg":
|
||||
# this is the end mib for the last default partition
|
||||
# from release N
|
||||
end_mib_default_cgts_vg = ipartition[end_mib_idx]
|
||||
mib_offset = int(new_end_mib) - int(end_mib_default_cgts_vg)
|
||||
if last_default_partition_idx < 0:
|
||||
# something we don't understand
|
||||
raise Exception("Cannot determine the partition layout in N release")
|
||||
|
||||
# the last partition kickstart creates is the first cgts-vg pv.
|
||||
foripvid = ipartition[foripvid_idx]
|
||||
ipartitions_all[-1][foripvid_idx] = foripvid
|
||||
else:
|
||||
device_node, device_path = \
|
||||
build_device_node_path(rootdisk_device_node,
|
||||
rootdisk_device_path,
|
||||
len(ipartitions_rootdisk) + 1)
|
||||
last_default_partition = rootdisk_partitions[last_default_partition_idx]
|
||||
end_mib_default_partition = last_default_partition["end_mib"]
|
||||
mib_offset = int(new_end_mib) - int(end_mib_default_partition)
|
||||
|
||||
# device_path_mapping[ipartition[device_path_idx] = device_path
|
||||
ipartition[device_node_idx] = device_node
|
||||
ipartition[device_path_idx] = device_path
|
||||
ipartition[start_mib_idx] = \
|
||||
int(ipartition[start_mib_idx]) + mib_offset
|
||||
ipartition[end_mib_idx] = int(ipartition[end_mib_idx]) + mib_offset
|
||||
ipartition[status_idx] = \
|
||||
constants.PARTITION_CREATE_ON_UNLOCK_STATUS
|
||||
# copy partition data into ipartitions_rootdisk array, i.e, remove
|
||||
# ending lvm_vg_name column
|
||||
ipartitions_rootdisk.append(ipartition[0: len(IPARTITION_COLUMNS)])
|
||||
LOG.info("DEBUG: recreating partition %s" % ipartition)
|
||||
LOG.info("DEBUG: new list of partitions %s" % ipartitions_rootdisk)
|
||||
return ipartitions_rootdisk
|
||||
next_partition_idx = last_default_partition_idx + 1
|
||||
for idx in range(next_partition_idx, len(rootdisk_partitions)):
|
||||
partition = rootdisk_partitions[idx]
|
||||
device_node, device_path = \
|
||||
build_device_node_path(rootdisk_device_node,
|
||||
rootdisk_device_path,
|
||||
len(new_rootdisk_partitions) + 1)
|
||||
|
||||
partition["device_node"] = device_node
|
||||
partition["device_path"] = device_path
|
||||
partition["start_mib"] = int(partition["start_mib"]) + mib_offset
|
||||
partition["end_mib"] = int(partition["end_mib"]) + mib_offset
|
||||
partition["status"] = constants.PARTITION_CREATE_ON_UNLOCK_STATUS
|
||||
new_rootdisk_partitions.append(partition)
|
||||
LOG.info("To recreate partition %s" % partition)
|
||||
|
||||
ipartitions = []
|
||||
for partition in new_rootdisk_partitions:
|
||||
ipartition = [partition[key] for key in IPARTITION_COLUMNS]
|
||||
ipartitions.append(ipartition)
|
||||
return ipartitions
|
||||
|
||||
|
||||
def update_partition(conn, ipartitions, forihostid, rootdisk):
|
||||
dp_idx = IPARTITION_COLUMNS.index("device_path")
|
||||
# uuid_idx = IPARTITION_COLUMNS.index("uuid")
|
||||
disk_uuid_idx = IDISK_COLUMNS.index("uuid")
|
||||
partition_disk_uuid_idx = IPARTITION_COLUMNS.index("idisk_uuid")
|
||||
rootdisk_uuid = rootdisk[disk_uuid_idx]
|
||||
rootdisk_uuid = rootdisk["uuid"]
|
||||
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
# 1. delete all partitions on rootdisk
|
||||
@ -448,76 +408,7 @@ def update_partition(conn, ipartitions, forihostid, rootdisk):
|
||||
return partitions
|
||||
|
||||
|
||||
def get_pvs(forihostid, lvgs, disk_or_part):
|
||||
# IPV_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid',
|
||||
# 'pv_state', 'pv_type', 'disk_or_part_uuid',
|
||||
# 'disk_or_part_device_node', 'lvm_pv_name',
|
||||
# 'lvm_vg_name', 'lvm_pv_uuid', 'lvm_pv_size',
|
||||
# 'lvm_pe_total', 'lvm_pe_alloced', 'capabilities',
|
||||
# 'forihostid', 'forilvgid', 'disk_or_part_device_path'
|
||||
#
|
||||
# ipv_get() data
|
||||
# 'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid',
|
||||
# 'lvm_pv_size', 'lvm_pe_total', 'lvm_pe_alloced',
|
||||
# adding 'created_at', 'updated_at', 'deleted_at', 'uuid',
|
||||
# 'forihostid', 'forilvgid', 'capabilities', 'pv_state',
|
||||
# 'disk_or_part_device_node', 'disk_or_part_device_path',
|
||||
# 'pv_type', 'disk_or_part_uuid'
|
||||
|
||||
pvo = Pv.PVOperator()
|
||||
pvs = pvo.ipv_get()
|
||||
now = datetime.now()
|
||||
ipvs = []
|
||||
|
||||
pv_additions = {
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"deleted_at": None,
|
||||
"forihostid": forihostid,
|
||||
# TODO: check to load capabilities
|
||||
"capabilities": None,
|
||||
# TODO: check to load pv_state
|
||||
"pv_state": "provisioned"
|
||||
}
|
||||
|
||||
for pv in pvs:
|
||||
# regenerate uuid
|
||||
pv_additions["uuid"] = "%s" % uuid.uuid4()
|
||||
pv.update(pv_additions)
|
||||
|
||||
# find forilvgid from matching lvg
|
||||
for lvg in lvgs:
|
||||
if lvg["lvm_vg_name"] == pv["lvm_vg_name"]:
|
||||
pv["forilvgid"] = lvg["id"]
|
||||
break
|
||||
else:
|
||||
raise Exception("no lvg matches pv %s %s" %
|
||||
(pv["lvm_vg_name"], pv["lvm_pv_name"]))
|
||||
|
||||
for dop in disk_or_part:
|
||||
if dop["foripvid"] == pv["id"]:
|
||||
pv["disk_or_part_device_node"] = dop["device_node"]
|
||||
pv["disk_or_part_device_path"] = dop["device_path"]
|
||||
pv["pv_type"] = dop["type"]
|
||||
pv["disk_or_part_uuid"] = dop["uuid"]
|
||||
break
|
||||
else:
|
||||
raise Exception("no disk or partition matches pv %s %s" %
|
||||
(pv["lvm_vg_name"], pv["lvm_pv_name"]))
|
||||
|
||||
ipv = []
|
||||
# the IPV_COLUMNS is the order of insert and update statement,
|
||||
# sort the values in the same order below to form
|
||||
# SQL statements
|
||||
ipv = [pv[col] for col in IPV_COLUMNS]
|
||||
ipvs.append(ipv)
|
||||
|
||||
return ipvs
|
||||
|
||||
|
||||
def update_pvs(conn, forihostid):
|
||||
LOG.info("update PVs")
|
||||
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
# partition records are pointing to i_pv, but the i_pv reference
|
||||
# to partition uuid (disk_or_part_uuid) and device_node
|
||||
@ -551,7 +442,7 @@ def update_pvs(conn, forihostid):
|
||||
" i_pv.id = v.id AND p.status = %s"
|
||||
cur.execute(sql, (constants.PV_ADD, forihostid, forihostid,
|
||||
constants.PARTITION_CREATE_ON_UNLOCK_STATUS))
|
||||
LOG.info("Update %s PVs" % cur.rowcount)
|
||||
LOG.info("Update %s PVs on partitions" % cur.rowcount)
|
||||
|
||||
sql = "SELECT id, uuid, lvm_pv_name, pv_type, pv_state, " \
|
||||
"disk_or_part_uuid " \
|
||||
@ -563,98 +454,15 @@ def update_pvs(conn, forihostid):
|
||||
return pvs
|
||||
|
||||
|
||||
def get_lvgs(forihostid):
|
||||
# LVG_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid',
|
||||
# 'vg_state', 'lvm_vg_name', 'lvm_vg_uuid', 'lvm_vg_access',
|
||||
# 'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv',
|
||||
# 'lvm_vg_size', 'lvm_vg_total_pe', 'lvm_vg_free_pe',
|
||||
# 'capabilities', 'forihostid'
|
||||
#
|
||||
# ilvg_get(): 'lvm_vg_name', 'lvm_vg_uuid', 'lvm_vg_access',
|
||||
# 'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv',
|
||||
# 'lvm_cur_pv', 'lvm_vg_size', 'lvm_vg_total_pe',
|
||||
# 'lvm_vg_free_pe',
|
||||
# adding 'created_at', 'updated_at', 'deleted_at', 'uuid',
|
||||
# 'forihostid', vg_state, capabilities for db
|
||||
lvgo = Lvg.LVGOperator()
|
||||
lvgs = lvgo.ilvg_get()
|
||||
now = datetime.now()
|
||||
ilvgs = []
|
||||
|
||||
lvg_additions = {
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"deleted_at": None,
|
||||
"forihostid": forihostid,
|
||||
# TODO: check to load capabilities
|
||||
"capabilities": None,
|
||||
# vg_state can only be provisioned during upgrade
|
||||
"vg_state": "provisioned"
|
||||
}
|
||||
|
||||
for lvg in lvgs:
|
||||
# regenerate uuid
|
||||
lvg_additions["uuid"] = "%s" % uuid.uuid4()
|
||||
lvg.update(lvg_additions)
|
||||
|
||||
ilvg = []
|
||||
# the LVG_COLUMNS is the order of insert statement,
|
||||
# sort the values in the same order below to form
|
||||
# SQL statements
|
||||
ilvg = [lvg[col] for col in LVG_COLUMNS]
|
||||
ilvgs.append(ilvg)
|
||||
|
||||
return ilvgs
|
||||
|
||||
|
||||
def update_lvgs(conn, forihostid, ilvgs):
|
||||
new_lvgs = []
|
||||
uuid_idx = LVG_COLUMNS.index("uuid")
|
||||
lvgname_idx = LVG_COLUMNS.index("lvm_vg_name")
|
||||
def update_lvgs(conn, forihostid):
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
for ilvg in ilvgs:
|
||||
lvg_name = ilvg[lvgname_idx]
|
||||
setters = ", ".join(["%s=%%s"] * len(LVG_COLUMNS))
|
||||
sql_tmp = "UPDATE i_lvg SET %s where forihostid=%s and " \
|
||||
"lvm_vg_name=%s" % (setters, "%%s", "%%s")
|
||||
sql = sql_tmp % LVG_COLUMNS
|
||||
|
||||
params = tuple(ilvg) + (forihostid, lvg_name)
|
||||
cur.execute(sql, params)
|
||||
if cur.rowcount == 0:
|
||||
LOG.debug("new lvg %s" % lvg_name)
|
||||
new_lvgs.append(ilvg)
|
||||
else:
|
||||
LOG.debug("update lvg %s" % lvg_name)
|
||||
|
||||
for ilvg in new_lvgs:
|
||||
lvg_name = ilvg[lvgname_idx]
|
||||
col_fmt = ", ".join(["%s"] * len(LVG_COLUMNS))
|
||||
values_fmt = ", ".join(["%%s"] * len(LVG_COLUMNS))
|
||||
sql_fmt = "INSERT INTO i_lvg (%s) VALUES(%s)" % \
|
||||
(col_fmt, values_fmt)
|
||||
sql = sql_fmt % LVG_COLUMNS
|
||||
cur.execute(sql, ilvg)
|
||||
if cur.rowcount == 1:
|
||||
LOG.info("Add new lvg %s, %s" % (lvg_name, ilvg[uuid_idx]))
|
||||
|
||||
lvg_names = [l[lvgname_idx] for l in ilvgs]
|
||||
in_list = ', '.join(['%s'] * len(lvg_names))
|
||||
# for the LVGs that are not created, (not retrieved from system),
|
||||
# mark them to be recreated during host unlock
|
||||
sql = "UPDATE i_lvg SET vg_state = '%s' " \
|
||||
"FROM (SELECT vg.id FROM i_lvg vg JOIN i_pv pv ON " \
|
||||
" pv.forilvgid = vg.id " \
|
||||
" WHERE vg.forihostid = %s AND " \
|
||||
" vg.lvm_vg_name not IN (%s)) AS filter " \
|
||||
"WHERE i_lvg.id = filter.id;" % \
|
||||
(constants.LVG_ADD, forihostid, in_list)
|
||||
|
||||
lvg_names = tuple(lvg_names)
|
||||
cur.execute(sql, lvg_names)
|
||||
# mark lvgs to be recreated during host unlock
|
||||
sql = "UPDATE i_lvg SET vg_state = %s " \
|
||||
"WHERE lvm_vg_name <> 'cgts-vg' AND forihostid = %s;"
|
||||
cur.execute(sql, (constants.LVG_ADD, forihostid))
|
||||
count = cur.rowcount
|
||||
if count > 0:
|
||||
LOG.info("%s lvg no longer exist" % count)
|
||||
LOG.info("%s lvg will be recreated" % count)
|
||||
|
||||
sql = "SELECT id, uuid, lvm_vg_name FROM i_lvg WHERE forihostid = %s"
|
||||
cur.execute(sql, (forihostid, ))
|
||||
@ -676,14 +484,56 @@ def get_disk_or_partition(conn, hostid):
|
||||
return dops
|
||||
|
||||
|
||||
def get_rootdisk(idisks, boot_device):
|
||||
dn_idx = IDISK_COLUMNS.index("device_node")
|
||||
dp_idx = IDISK_COLUMNS.index("device_path")
|
||||
# The boot_device from i_host can be either device_node or device_path
|
||||
for idisk in idisks:
|
||||
if boot_device in (idisk[dp_idx], idisk[dn_idx]):
|
||||
return idisk
|
||||
raise Exception("Cannot find root disk %s" % boot_device)
|
||||
def get_rootdisk(conn, hostid, boot_device):
|
||||
# return device_node and device_path of rootdisk
|
||||
sql = "SELECT id, uuid, device_node, device_path " \
|
||||
"FROM i_idisk " \
|
||||
"WHERE (device_node = %s OR device_path = %s) AND forihostid = %s"
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute(sql, (boot_device, boot_device, hostid))
|
||||
rootdisk = cur.fetchone()
|
||||
return rootdisk
|
||||
|
||||
|
||||
def get_hosts(conn):
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute("SELECT id, hostname, personality, boot_device "
|
||||
"FROM i_host WHERE personality "
|
||||
"IN ('controller', 'worker', 'storage');")
|
||||
nodes = cur.fetchall()
|
||||
return nodes
|
||||
|
||||
|
||||
def update_host(conn, host, partition_template):
|
||||
hostid = host["id"]
|
||||
hostname = host["hostname"]
|
||||
|
||||
rootdisk = get_rootdisk(conn, hostid, host["boot_device"])
|
||||
ipartitions = get_ipartitions(hostid,
|
||||
partition_template, rootdisk)
|
||||
|
||||
ipartitions = append_additional_partitions(conn, ipartitions,
|
||||
host, rootdisk)
|
||||
ipartitions = update_partition(conn, ipartitions, hostid, rootdisk)
|
||||
|
||||
lvgs = update_lvgs(conn, hostid)
|
||||
|
||||
pvs = update_pvs(conn, hostid)
|
||||
|
||||
LOG.info("partition migration summary %s:" % hostname)
|
||||
LOG.info("=" * 60)
|
||||
LOG.info("new list of lvgs:")
|
||||
for lvg in lvgs:
|
||||
LOG.info("%s" % lvg)
|
||||
|
||||
LOG.info("new list of pvs:")
|
||||
for pv in pvs:
|
||||
LOG.info("%s" % pv)
|
||||
|
||||
LOG.info("new list of partitions:")
|
||||
for ip in ipartitions:
|
||||
LOG.info(ip)
|
||||
LOG.info("=" * 60)
|
||||
|
||||
|
||||
def do_update():
|
||||
@ -691,40 +541,24 @@ def do_update():
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
|
||||
try:
|
||||
ihost = get_ihost(conn)
|
||||
hostid = ihost["id"]
|
||||
boot_device = ihost["boot_device"]
|
||||
cur_host = get_cur_host(conn)
|
||||
rootdisk = get_rootdisk(conn, cur_host["id"], cur_host["boot_device"])
|
||||
controller_partitions = get_controller_partition_template(rootdisk)
|
||||
worker_partitions = get_node_partition_template(WORKER_PARTITION_LIST)
|
||||
stor_partitions = get_node_partition_template(STORAGE_PARTITION_LIST)
|
||||
|
||||
LOG.info("Upgrade hostid %s, boot_device %s" % (hostid, boot_device))
|
||||
disk_uuid_mapping = get_disk_uuid_mapping(conn, hostid)
|
||||
idisks = get_idisks(hostid, disk_uuid_mapping)
|
||||
rootdisk = get_rootdisk(idisks, boot_device)
|
||||
disks = update_disks(conn, idisks)
|
||||
# migrate hosts with the partition template
|
||||
hosts = get_hosts(conn)
|
||||
for host in hosts:
|
||||
personality = host["personality"]
|
||||
if personality == "worker":
|
||||
partition_template = worker_partitions
|
||||
elif personality == "controller":
|
||||
partition_template = controller_partitions
|
||||
elif personality == "storage":
|
||||
partition_template = stor_partitions
|
||||
|
||||
ipartitions = get_ipartitions(hostid, disks)
|
||||
ipartitions = append_additional_partitions(conn, ipartitions,
|
||||
hostid, rootdisk)
|
||||
ipartitions = update_partition(conn, ipartitions, hostid, rootdisk)
|
||||
|
||||
ilvgs = get_lvgs(hostid)
|
||||
lvgs = update_lvgs(conn, hostid, ilvgs)
|
||||
|
||||
pvs = update_pvs(conn, hostid)
|
||||
|
||||
LOG.info("partition migration summary:")
|
||||
LOG.info("=========================================================")
|
||||
LOG.info("new list of lvgs:")
|
||||
for lvg in lvgs:
|
||||
LOG.info("%s" % lvg)
|
||||
|
||||
LOG.info("new list of pvs:")
|
||||
for pv in pvs:
|
||||
LOG.info("%s" % pv)
|
||||
|
||||
LOG.info("new list of partitions:")
|
||||
for ip in ipartitions:
|
||||
LOG.info(ip)
|
||||
LOG.info("=========================================================")
|
||||
update_host(conn, host, partition_template)
|
||||
|
||||
except psycopg2.Error as ex:
|
||||
conn.rollback()
|
||||
@ -744,5 +578,6 @@ def do_update():
|
||||
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
@ -2199,7 +2199,7 @@ class HostController(rest.RestController):
|
||||
new_ihost_mtc = ihost_obj.as_dict()
|
||||
new_ihost_mtc = cutils.removekeys_nonmtce(new_ihost_mtc)
|
||||
|
||||
if hostupdate.ihost_orig['invprovision'] == constants.PROVISIONED:
|
||||
if hostupdate.ihost_orig['invprovision'] in [constants.UPGRADING, constants.PROVISIONED]:
|
||||
new_ihost_mtc.update({'operation': 'modify'})
|
||||
else:
|
||||
new_ihost_mtc.update({'operation': 'add'})
|
||||
@ -2284,7 +2284,7 @@ class HostController(rest.RestController):
|
||||
if hostupdate.configure_required:
|
||||
# rollback to unconfigure host as mtce has failed the request
|
||||
invprovision_state = hostupdate.ihost_orig.get('invprovision') or ""
|
||||
if invprovision_state != constants.PROVISIONED:
|
||||
if invprovision_state not in [constants.UPGRADING, constants.PROVISIONED]:
|
||||
LOG.warn("unconfigure ihost %s provision=%s" %
|
||||
(ihost_obj.uuid, invprovision_state))
|
||||
pecan.request.rpcapi.unconfigure_ihost(
|
||||
@ -2511,7 +2511,7 @@ class HostController(rest.RestController):
|
||||
|
||||
if (ihost.hostname and ihost.personality and
|
||||
ihost.invprovision and
|
||||
ihost.invprovision == constants.PROVISIONED and
|
||||
ihost.invprovision in [constants.UPGRADING, constants.PROVISIONED] and
|
||||
(constants.WORKER in ihost.subfunctions)):
|
||||
# wait for VIM signal
|
||||
return
|
||||
@ -2538,7 +2538,7 @@ class HostController(rest.RestController):
|
||||
ceph_mons[0].uuid, {'device_path': None}
|
||||
)
|
||||
|
||||
remove_from_cluster = True if ihost.invprovision == constants.PROVISIONED else False
|
||||
remove_from_cluster = True if ihost.invprovision in [constants.UPGRADING, constants.PROVISIONED] else False
|
||||
|
||||
# Delete the stor entries associated with this host
|
||||
istors = pecan.request.dbapi.istor_get_by_ihost(ihost['uuid'])
|
||||
@ -2613,7 +2613,7 @@ class HostController(rest.RestController):
|
||||
personality.find(constants.STORAGE_HOSTNAME) != -1 and
|
||||
ihost.hostname not in [constants.STORAGE_0_HOSTNAME,
|
||||
constants.STORAGE_1_HOSTNAME] and
|
||||
ihost.invprovision in [constants.PROVISIONED,
|
||||
ihost.invprovision in [constants.UPGRADING, constants.PROVISIONED,
|
||||
constants.PROVISIONING]):
|
||||
self._ceph.host_crush_remove(ihost.hostname)
|
||||
|
||||
@ -2771,6 +2771,8 @@ class HostController(rest.RestController):
|
||||
# perform rpc to conductor to do the update with root privilege access
|
||||
pecan.request.rpcapi.update_controller_upgrade_flag(pecan.request.context)
|
||||
|
||||
pecan.request.dbapi.ihost_update(uuid,
|
||||
{'invprovision': constants.UPGRADING})
|
||||
return Host.convert_with_links(rpc_ihost)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@ -2984,7 +2986,7 @@ class HostController(rest.RestController):
|
||||
if rpc_ihost.administrative != constants.ADMIN_LOCKED:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("The host must be locked before performing this operation"))
|
||||
elif rpc_ihost.invprovision != "provisioned":
|
||||
elif rpc_ihost.invprovision not in [constants.UPGRADING, constants.PROVISIONED]:
|
||||
raise wsme.exc.ClientSideError(_("The host must be provisioned "
|
||||
"before performing this operation"))
|
||||
elif not force and rpc_ihost.availability != "online":
|
||||
@ -4928,8 +4930,9 @@ class HostController(rest.RestController):
|
||||
if 'operational' in hostupdate.delta and \
|
||||
hostupdate.ihost_patch['operational'] == \
|
||||
constants.OPERATIONAL_ENABLED:
|
||||
if hostupdate.ihost_orig['invprovision'] == constants.PROVISIONING or \
|
||||
hostupdate.ihost_orig['invprovision'] == constants.UNPROVISIONED:
|
||||
if hostupdate.ihost_orig['invprovision'] in [constants.UPGRADING,
|
||||
constants.PROVISIONING,
|
||||
constants.UNPROVISIONED]:
|
||||
# first time unlocked successfully
|
||||
local_hostname = cutils.get_local_controller_hostname()
|
||||
if (hostupdate.ihost_patch['hostname'] ==
|
||||
@ -4953,7 +4956,7 @@ class HostController(rest.RestController):
|
||||
)
|
||||
host_names = []
|
||||
for ihost in ihosts:
|
||||
if ihost.invprovision == constants.PROVISIONED:
|
||||
if ihost.invprovision in [constants.UPGRADING, constants.PROVISIONED]:
|
||||
host_names.append(ihost.hostname)
|
||||
LOG.info("Provisioned storage node(s) %s" % host_names)
|
||||
|
||||
@ -5170,7 +5173,7 @@ class HostController(rest.RestController):
|
||||
ihost = hostupdate.ihost_patch
|
||||
delta = hostupdate.delta
|
||||
|
||||
provision_state = [constants.PROVISIONED, constants.PROVISIONING]
|
||||
provision_state = [constants.UPGRADING, constants.PROVISIONED, constants.PROVISIONING]
|
||||
if hostupdate.ihost_orig['invprovision'] in provision_state:
|
||||
state_rel_path = ['hostname', 'personality', 'subfunctions']
|
||||
if any(p in state_rel_path for p in delta):
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -462,7 +462,7 @@ def _are_partition_operations_simultaneous(ihost, partition, operation):
|
||||
forihostid=partition['forihostid'])
|
||||
|
||||
if (ihost.invprovision in
|
||||
[constants.PROVISIONED, constants.PROVISIONING]):
|
||||
[constants.UPGRADING, constants.PROVISIONED, constants.PROVISIONING]):
|
||||
if not (all(host_partition.get('status') in
|
||||
[constants.PARTITION_READY_STATUS,
|
||||
constants.PARTITION_IN_USE_STATUS,
|
||||
@ -532,7 +532,7 @@ def _semantic_checks(operation, partition):
|
||||
#############
|
||||
# Only allow in-service modify of partitions. If the host isn't
|
||||
# provisioned just limit operations to create/delete.
|
||||
if ihost.invprovision != constants.PROVISIONED:
|
||||
if ihost.invprovision not in [constants.PROVISIONED, constants.UPGRADING]:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Only partition Add/Delete operations are allowed on an "
|
||||
"unprovisioned host."))
|
||||
@ -655,7 +655,8 @@ def _create(partition):
|
||||
_build_device_node_path(partition)
|
||||
|
||||
# Set the status of the new partition
|
||||
if (ihost.invprovision in [constants.PROVISIONED,
|
||||
if (ihost.invprovision in [constants.UPGRADING,
|
||||
constants.PROVISIONED,
|
||||
constants.PROVISIONING]):
|
||||
partition['status'] = constants.PARTITION_CREATE_IN_SVC_STATUS
|
||||
else:
|
||||
@ -679,7 +680,8 @@ def _create(partition):
|
||||
# - PROVISIONED: standard controller/worker (after config_controller)
|
||||
# - PROVISIONING: AIO (after config_controller) and before worker
|
||||
# configuration
|
||||
if (ihost.invprovision in [constants.PROVISIONED,
|
||||
if (ihost.invprovision in [constants.UPGRADING,
|
||||
constants.PROVISIONED,
|
||||
constants.PROVISIONING]):
|
||||
# Instruct puppet to implement the change
|
||||
pecan.request.rpcapi.update_partition_config(pecan.request.context,
|
||||
|
@ -388,7 +388,7 @@ def _check_field(field):
|
||||
def _check_device_sriov(device, host):
|
||||
sriov_update = False
|
||||
if (device['pdevice_id'] in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
host.invprovision != constants.PROVISIONED):
|
||||
host.invprovision not in [constants.UPGRADING, constants.PROVISIONED]):
|
||||
raise wsme.exc.ClientSideError(_("Cannot configure device %s "
|
||||
"until host %s is unlocked for the first time." %
|
||||
(device['uuid'], host.hostname)))
|
||||
|
@ -16,7 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2022 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import jsonpatch
|
||||
@ -697,10 +697,12 @@ def _check_device(new_pv, ihost):
|
||||
# Perform a quick validation check on this partition as it may be added
|
||||
# immediately.
|
||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||
((ihost['invprovision'] in [constants.PROVISIONED,
|
||||
((ihost['invprovision'] in [constants.UPGRADING,
|
||||
constants.PROVISIONED,
|
||||
constants.PROVISIONING]) and
|
||||
(new_pv_device.status != constants.PARTITION_READY_STATUS)) or
|
||||
((ihost['invprovision'] not in [constants.PROVISIONED,
|
||||
((ihost['invprovision'] not in [constants.UPGRADING,
|
||||
constants.PROVISIONED,
|
||||
constants.PROVISIONING]) and
|
||||
(new_pv_device.status not in [
|
||||
constants.PARTITION_CREATE_ON_UNLOCK_STATUS,
|
||||
|
@ -16,7 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2022 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
from eventlet.green import subprocess
|
||||
@ -784,7 +784,7 @@ class SBApiHelper(object):
|
||||
)
|
||||
|
||||
for chost in chosts:
|
||||
if chost.invprovision != constants.PROVISIONED:
|
||||
if chost.invprovision not in [constants.PROVISIONED, constants.UPGRADING]:
|
||||
raise wsme.exc.ClientSideError(
|
||||
"This operation requires %s controllers provisioned." %
|
||||
min_number
|
||||
|
@ -1,7 +1,7 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Copyright (c) 2016, 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2016-2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -743,8 +743,8 @@ class CephApiOperator(object):
|
||||
return
|
||||
storage_num = int(hostupdate.ihost_orig['hostname'][8:])
|
||||
if (storage_num >= 2 and
|
||||
hostupdate.ihost_orig['invprovision'] !=
|
||||
constants.PROVISIONED):
|
||||
hostupdate.ihost_orig['invprovision'] not in
|
||||
[constants.UPGRADING, constants.PROVISIONED]):
|
||||
|
||||
# update crushmap accordingly with the host and it's peer group
|
||||
node_bucket = hostupdate.ihost_orig['hostname']
|
||||
|
@ -162,6 +162,7 @@ FORCE_LOCKING = "Force Locking"
|
||||
OPERATIONAL_ENABLED = 'enabled'
|
||||
OPERATIONAL_DISABLED = 'disabled'
|
||||
|
||||
UPGRADING = 'upgrading'
|
||||
PROVISIONED = 'provisioned'
|
||||
PROVISIONING = 'provisioning'
|
||||
UNPROVISIONED = 'unprovisioned'
|
||||
|
@ -1659,14 +1659,15 @@ def partitions_are_in_order(disk_partitions, requested_partitions):
|
||||
|
||||
# TODO(oponcea): Remove once sm supports in-service configuration reload.
|
||||
def is_single_controller(dbapi):
|
||||
# Check the number of provisioned/provisioning hosts. If there is
|
||||
# only one then we have a single controller (AIO-SX, single AIO-DX, or
|
||||
# Check the number of provisioned/upgrading/provisioning hosts. If there
|
||||
# is only one then we have a single controller (AIO-SX, single AIO-DX, or
|
||||
# single std controller). If this is the case reset sm after adding
|
||||
# cinder so that cinder DRBD/processes are managed.
|
||||
hosts = dbapi.ihost_get_list()
|
||||
prov_hosts = [h for h in hosts
|
||||
if h.invprovision in [constants.PROVISIONED,
|
||||
constants.PROVISIONING]]
|
||||
constants.PROVISIONING,
|
||||
constants.UPGRADING]]
|
||||
if len(prov_hosts) == 1:
|
||||
return True
|
||||
return False
|
||||
|
@ -2766,7 +2766,7 @@ class ConductorManager(service.PeriodicService):
|
||||
except exception.AddressNotFoundByName:
|
||||
pass
|
||||
|
||||
if ihost.invprovision not in [constants.PROVISIONED, constants.PROVISIONING]:
|
||||
if ihost.invprovision not in [constants.PROVISIONED, constants.PROVISIONING, constants.UPGRADING]:
|
||||
LOG.info("Updating %s host invprovision from %s to %s" %
|
||||
(ihost.hostname, ihost.invprovision, constants.UNPROVISIONED))
|
||||
value = {'invprovision': constants.UNPROVISIONED}
|
||||
@ -3721,7 +3721,7 @@ class ConductorManager(service.PeriodicService):
|
||||
return
|
||||
|
||||
if ihost['administrative'] == constants.ADMIN_LOCKED and \
|
||||
ihost['invprovision'] == constants.PROVISIONED and \
|
||||
ihost['invprovision'] in [constants.PROVISIONED, constants.UPGRADING] and \
|
||||
not force_update:
|
||||
LOG.debug("Ignore the host memory audit after the host is locked")
|
||||
return
|
||||
@ -4131,6 +4131,17 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.exception("Invalid ihost_uuid %s" % ihost_uuid)
|
||||
return
|
||||
|
||||
try:
|
||||
self.dbapi.software_upgrade_get_one()
|
||||
except exception.NotFound:
|
||||
# No upgrade in progress
|
||||
pass
|
||||
else:
|
||||
if ihost.software_load != tsc.SW_VERSION or ihost.invprovision == constants.UPGRADING:
|
||||
LOG.info("Ignore updating lvg for host: %s. Version "
|
||||
"%s mismatch." % (ihost.hostname, ihost.software_load))
|
||||
return
|
||||
|
||||
forihostid = ihost['id']
|
||||
|
||||
ilvgs = self.dbapi.ilvg_get_by_ihost(ihost_uuid)
|
||||
@ -4466,7 +4477,7 @@ class ConductorManager(service.PeriodicService):
|
||||
try:
|
||||
ihost = self.dbapi.ihost_get(ipv.get('forihostid'))
|
||||
values = {'foripvid': None}
|
||||
if ihost['invprovision'] == constants.PROVISIONED:
|
||||
if ihost['invprovision'] in [constants.PROVISIONED, constants.UPGRADING]:
|
||||
values.update(
|
||||
{'status': constants.PARTITION_READY_STATUS})
|
||||
self.dbapi.partition_update(ipv['disk_or_part_uuid'], values)
|
||||
@ -4549,7 +4560,7 @@ class ConductorManager(service.PeriodicService):
|
||||
# No upgrade in progress
|
||||
pass
|
||||
else:
|
||||
if db_host.software_load != tsc.SW_VERSION:
|
||||
if db_host.software_load != tsc.SW_VERSION or db_host.invprovision == constants.UPGRADING:
|
||||
LOG.info("Ignore updating disk partition for host: %s. Version "
|
||||
"%s mismatch." % (db_host.hostname, db_host.software_load))
|
||||
return
|
||||
@ -4781,7 +4792,7 @@ class ConductorManager(service.PeriodicService):
|
||||
# No upgrade in progress
|
||||
pass
|
||||
else:
|
||||
if ihost.software_load != tsc.SW_VERSION:
|
||||
if ihost.software_load != tsc.SW_VERSION or ihost.invprovision == constants.UPGRADING:
|
||||
LOG.info("Ignore updating physical volume for host: %s. Version "
|
||||
"%s mismatch." % (ihost.hostname, ihost.software_load))
|
||||
return
|
||||
@ -5225,23 +5236,6 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.info("remove out-of-date rook provisioned pv %s" % ipv.lvm_pv_name)
|
||||
self._prepare_for_ipv_removal(ipv)
|
||||
self.dbapi.ipv_destroy(ipv.id)
|
||||
|
||||
# If upgrading from CentOS to Debian the partition scheme
|
||||
# may differ, so we can remove the PV in this case
|
||||
# TODO (heitormatsui): remove when CentOS to Debian upgrade is deprecated
|
||||
try:
|
||||
upgrade_in_progress = self.dbapi.software_upgrade_get_one()
|
||||
loads = self.dbapi.load_get_list()
|
||||
target_load = cutils.get_imported_load(loads)
|
||||
host_upgrade = self.dbapi.host_upgrade_get_by_host(forihostid)
|
||||
if (host_upgrade.software_load == upgrade_in_progress.to_load and
|
||||
target_load.software_version == tsc.SW_VERSION_22_12):
|
||||
# remove duplicated pv data from CentOS
|
||||
LOG.info("remove out-of-date CentOS provisioned pv %s" % ipv.lvm_pv_name)
|
||||
self._prepare_for_ipv_removal(ipv)
|
||||
self.dbapi.ipv_destroy(ipv.id)
|
||||
except exception.NotFound:
|
||||
pass
|
||||
else:
|
||||
if (ipv.pv_state == constants.PV_ERR and
|
||||
ipv.lvm_vg_name == ipv_in_agent['lvm_vg_name']):
|
||||
@ -5398,7 +5392,7 @@ class ConductorManager(service.PeriodicService):
|
||||
for host_id, update_set in update_hosts.items():
|
||||
|
||||
ihost = self.dbapi.ihost_get(host_id)
|
||||
if (ihost.invprovision != constants.PROVISIONED and
|
||||
if (ihost.invprovision not in [constants.PROVISIONED, constants.UPGRADING] and
|
||||
tsc.system_type != constants.TIS_AIO_BUILD):
|
||||
continue
|
||||
if ihost:
|
||||
@ -11032,7 +11026,7 @@ class ConductorManager(service.PeriodicService):
|
||||
# node before the "worker_config_complete" has been
|
||||
# executed.
|
||||
elif (force or
|
||||
host.invprovision == constants.PROVISIONED or
|
||||
host.invprovision in [constants.PROVISIONED, constants.UPGRADING] or
|
||||
(host.invprovision == constants.PROVISIONING and
|
||||
host.personality == constants.CONTROLLER)):
|
||||
if host.software_load == tsc.SW_VERSION:
|
||||
|
@ -0,0 +1,64 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sqlalchemy import MetaData, Table, Column, Integer, Enum, String
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
|
||||
ENGINE = 'InnoDB'
|
||||
CHARSET = 'utf8'
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
Table('i_system',
|
||||
meta,
|
||||
Column('id', Integer,
|
||||
primary_key=True, nullable=False),
|
||||
mysql_engine=ENGINE, mysql_charset=CHARSET)
|
||||
|
||||
i_host = Table('i_host',
|
||||
meta,
|
||||
Column('id', Integer,
|
||||
primary_key=True, nullable=False),
|
||||
mysql_engine=ENGINE, mysql_charset=CHARSET,
|
||||
autoload=True)
|
||||
|
||||
if migrate_engine.url.get_dialect() is postgresql.dialect:
|
||||
old_provisionEnum = Enum('unprovisioned',
|
||||
'inventoried',
|
||||
'configured',
|
||||
'provisioning',
|
||||
'provisioned',
|
||||
'reserve1',
|
||||
'reserve2',
|
||||
name='invprovisionStateEnum')
|
||||
|
||||
provisionEnum = Enum('unprovisioned',
|
||||
'inventoried',
|
||||
'configured',
|
||||
'provisioning',
|
||||
'provisioned',
|
||||
'upgrading',
|
||||
'reserve1',
|
||||
'reserve2',
|
||||
name='invprovisionStateEnum')
|
||||
|
||||
inv_provision_col = i_host.c.invprovision
|
||||
inv_provision_col.alter(Column('invprovision', String(60)))
|
||||
old_provisionEnum.drop(bind=migrate_engine, checkfirst=False)
|
||||
provisionEnum.create(bind=migrate_engine, checkfirst=False)
|
||||
migrate_engine.execute('ALTER TABLE i_host ALTER COLUMN invprovision TYPE "invprovisionStateEnum" '
|
||||
'USING invprovision::text::"invprovisionStateEnum"')
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
# As per other openstack components, downgrade is
|
||||
# unsupported in this release.
|
||||
raise NotImplementedError('SysInv database downgrade is unsupported.')
|
@ -126,6 +126,7 @@ class ihost(Base):
|
||||
'configured',
|
||||
'provisioning',
|
||||
'provisioned',
|
||||
'upgrading',
|
||||
'reserve1',
|
||||
'reserve2',
|
||||
name='invprovisionStateEnum')
|
||||
|
Loading…
Reference in New Issue
Block a user