diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/idisk_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/idisk_shell.py index 74bb07fea4..2034404e49 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/idisk_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/idisk_shell.py @@ -14,6 +14,7 @@ from cgtsclient.common import constants from cgtsclient.common import utils from cgtsclient import exc from cgtsclient.v1 import ihost as ihost_utils +import math def _print_idisk_show(idisk): @@ -22,7 +23,7 @@ def _print_idisk_show(idisk): 'ihost_uuid', 'istor_uuid', 'ipv_uuid', 'created_at', 'updated_at'] labels = ['device_node', 'device_num', 'device_type', 'device_path', - 'size_mib', 'available_mib', 'rpm', 'serial_id', 'uuid', + 'size_gib', 'available_gib', 'rpm', 'serial_id', 'uuid', 'ihost_uuid', 'istor_uuid', 'ipv_uuid', 'created_at', 'updated_at'] data = [(f, getattr(idisk, f, '')) for f in fields] @@ -50,6 +51,11 @@ def do_host_disk_show(cc, args): """Show disk attributes.""" ihost = ihost_utils._find_ihost(cc, args.hostnameorid) idisk = _find_disk(cc, ihost, args.device_nodeoruuid) + + # Convert size from mib to gib and round it down + idisk.size_mib = math.floor(float(idisk.size_mib) / 1024 * 1000) / 1000.0 + idisk.available_mib = math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0 + _print_idisk_show(idisk) @@ -64,12 +70,17 @@ def do_host_disk_list(cc, args): idisks = cc.idisk.list(ihost.uuid) field_labels = ['uuid', 'device_node', 'device_num', 'device_type', - 'size_mib', 'available_mib', 'rpm', 'serial_id', + 'size_gib', 'available_gib', 'rpm', 'serial_id', 'device_path'] fields = ['uuid', 'device_node', 'device_num', 'device_type', 'size_mib', 'available_mib', 'rpm', 'serial_id', 'device_path'] + # Convert size from mib to gib and round it down + for i in idisks: + i.size_mib = math.floor(float(i.size_mib) / 1024 * 1000) / 1000.0 + i.available_mib = math.floor(float(i.available_mib) / 1024 * 1000) / 1000.0 + utils.print_list(idisks, fields, field_labels, sortby=1) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ilvg_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ilvg_shell.py index 3c44f21d00..09c3b8af64 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ilvg_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ilvg_shell.py @@ -18,6 +18,11 @@ from oslo_serialization import jsonutils def _print_ilvg_show(ilvg): + labels = ['lvm_vg_name', 'vg_state', 'uuid', 'ihost_uuid', 'lvm_vg_access', + 'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv', + 'lvm_vg_size_gib', 'lvm_vg_total_pe', 'lvm_vg_free_pe', 'created_at', + 'updated_at', 'parameters'] + fields = ['lvm_vg_name', 'vg_state', 'uuid', 'ihost_uuid', 'lvm_vg_access', 'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv', 'lvm_vg_size', 'lvm_vg_total_pe', 'lvm_vg_free_pe', 'created_at', @@ -32,7 +37,7 @@ def _print_ilvg_show(ilvg): # rename capabilities for display purposes and add to display list data.append(('parameters', getattr(ilvg, 'capabilities', ''))) - utils.print_tuple_list(data) + utils.print_tuple_list(data, labels) def _find_lvg(cc, ihost, lvguuid): @@ -171,11 +176,11 @@ def do_host_lvg_delete(cc, args): help=("Set the number of concurrent I/O intensive disk operations " "such as glance image downloads, image format conversions, " "etc. [nova-local]")) -@utils.arg('-s', '--instances_lv_size_mib', - metavar='', - help=("Set the desired size (in MiB) of the instances LV that is " +@utils.arg('-s', '--instances_lv_size_gib', + metavar='', + help=("Set the desired size (in GiB) of the instances LV that is " "used for /etc/nova/instances. Example: For a 50GB volume, " - "use 51200. Required when instance backing is \"lvm\". " + "use 50. Required when instance backing is \"lvm\". " "[nova-local]")) @utils.arg('-l', '--lvm_type', metavar='', @@ -187,22 +192,29 @@ def do_host_lvg_modify(cc, args): # Get all the fields from the command arguments field_list = ['hostnameorid', 'lvgnameoruuid', - 'instance_backing', 'instances_lv_size_mib', + 'instance_backing', 'instances_lv_size_gib', 'concurrent_disk_operations', 'lvm_type'] fields = dict((k, v) for (k, v) in vars(args).items() if k in field_list and not (v is None)) - all_caps_list = ['instance_backing', 'instances_lv_size_mib', + all_caps_list = ['instance_backing', 'instances_lv_size_gib', 'concurrent_disk_operations', 'lvm_type'] - integer_fields = ['instances_lv_size_mib', 'concurrent_disk_operations'] + integer_fields = ['instances_lv_size_gib', 'concurrent_disk_operations'] requested_caps_dict = {} for cap in all_caps_list: if cap in fields: - if cap in integer_fields: - requested_caps_dict[cap] = int(fields[cap]) - else: - requested_caps_dict[cap] = fields[cap] + try: + if cap in integer_fields: + requested_caps_dict[cap] = int(fields[cap]) + else: + requested_caps_dict[cap] = fields[cap] + if cap == 'instances_lv_size_gib': + requested_caps_dict['instances_lv_size_mib'] = \ + requested_caps_dict.pop('instances_lv_size_gib') * 1024 + except ValueError: + raise exc.CommandError('instances_lv size must be an integer ' + 'greater than 0: %s' % fields[cap]) # Get the ihost object ihost = ihost_utils._find_ihost(cc, args.hostnameorid) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py index d014827572..8e6f2135db 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py @@ -17,6 +17,7 @@ from cgtsclient.v1 import ethernetport as ethernetport_utils from cgtsclient.v1 import icpu as icpu_utils from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import iprofile as iprofile_utils +import math # # INTERFACE PROFILES @@ -311,7 +312,7 @@ def get_storconfig_detailed(iprofile): if stor.function == 'journal' and count > 1: str += " %s" % journals[stor.uuid] if stor.function == 'osd': - str += ", ceph journal: size %s MiB, " % stor.journal_size_mib + str += ", ceph journal: size %s GiB, " % (stor.journal_size_mib / 1024) if stor.journal_location == stor.uuid: str += "collocated on osd stor" else: @@ -329,7 +330,7 @@ def get_diskconfig(iprofile): for disk in iprofile.disks: if str != '': str = str + "; " - str = str + "%s: %s" % (disk.device_path, disk.size_mib) + str = str + "%s: %s GiB" % (disk.device_path, math.floor(float(disk.size_mib) / 1024 * 1000) / 1000.0) if not disk.device_path: invalid_profile = True return str, invalid_profile @@ -340,7 +341,7 @@ def get_partconfig(iprofile): for part in iprofile.partitions: if str != '': str = str + "; " - str = str + "%s: %s" % (part.device_path, part.size_mib) + str = str + "%s: %s GiB" % (part.device_path, math.floor(float(part.size_mib) / 1024 * 1000) / 1000.0) return str @@ -354,6 +355,9 @@ def get_ilvg_config(iprofile): for k, v in ilvg.capabilities.iteritems(): if capabilities_str != '': capabilities_str += "; " + if k == "instances_lv_size_mib": + k = "instances_lv_size_gib" + v = v / 1024 capabilities_str += "%s: %s " % (k, v) str += "%s, %s" % (ilvg.lvm_vg_name, capabilities_str) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ipv_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ipv_shell.py index c9172caf7a..11c3dd76e2 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ipv_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ipv_shell.py @@ -15,16 +15,23 @@ from cgtsclient.v1 import idisk as idisk_utils from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import ilvg as ilvg_utils from cgtsclient.v1 import partition as partition_utils +import math def _print_ipv_show(ipv): + labels = ['uuid', 'pv_state', 'pv_type', 'disk_or_part_uuid', + 'disk_or_part_device_node', 'disk_or_part_device_path', + 'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid', + 'lvm_pv_size_gib', 'lvm_pe_total', 'lvm_pe_alloced', 'ihost_uuid', + 'created_at', 'updated_at'] fields = ['uuid', 'pv_state', 'pv_type', 'disk_or_part_uuid', 'disk_or_part_device_node', 'disk_or_part_device_path', 'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid', 'lvm_pv_size', 'lvm_pe_total', 'lvm_pe_alloced', 'ihost_uuid', 'created_at', 'updated_at'] + ipv.lvm_pv_size = math.floor(float(ipv.lvm_pv_size) / (1024 ** 3) * 1000) / 1000.0 data = [(f, getattr(ipv, f, '')) for f in fields] - utils.print_tuple_list(data) + utils.print_tuple_list(data, labels) def _find_pv(cc, ihost, pvuuid): diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/istor_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/istor_shell.py index 282e208abd..ef556cdf0b 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/istor_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/istor_shell.py @@ -20,8 +20,12 @@ def _print_istor_show(istor): 'journal_size_mib', 'journal_path', 'journal_node', 'uuid', 'ihost_uuid', 'idisk_uuid', 'tier_uuid', 'tier_name', 'created_at', 'updated_at'] + labels = ['osdid', 'function', 'journal_location', + 'journal_size_gib', 'journal_path', 'journal_node', + 'uuid', 'ihost_uuid', 'idisk_uuid', 'tier_uuid', 'tier_name', + 'created_at', 'updated_at'] data = [(f, getattr(istor, f, '')) for f in fields] - utils.print_tuple_list(data) + utils.print_tuple_list(data, labels) def _find_stor(cc, ihost, storuuid): @@ -50,6 +54,10 @@ def do_host_stor_show(cc, args): i = cc.istor.get(args.storuuid) + # convert journal size from mib to gib when display + if i.journal_size_mib: + i.journal_size_mib = i.journal_size_mib / 1024 + _print_istor_show(i) @@ -64,9 +72,13 @@ def do_host_stor_list(cc, args): for i in istors: istor_utils._get_disks(cc, ihost, i) + # convert journal size from mib to gib when display + if i.journal_size_mib: + i.journal_size_mib = i.journal_size_mib / 1024 + field_labels = ['uuid', 'function', 'osdid', 'capabilities', 'idisk_uuid', 'journal_path', 'journal_node', - 'journal_size_mib', 'tier_name'] + 'journal_size_gib', 'tier_name'] fields = ['uuid', 'function', 'osdid', 'capabilities', 'idisk_uuid', 'journal_path', 'journal_node', 'journal_size_mib', 'tier_name'] @@ -91,10 +103,10 @@ def do_host_stor_list(cc, args): default=None, help="Location of stor's journal") @utils.arg('--journal-size', - metavar='', + metavar='', nargs='?', default=None, - help="Size of stor's journal, in MiB") + help="Size of stor's journal, in GiB") @utils.arg('--tier-uuid', metavar='', nargs='?', @@ -105,18 +117,22 @@ def do_host_stor_add(cc, args): field_list = ['function', 'idisk_uuid', 'journal_location', 'journal_size', 'tier_uuid'] - - # default values, name comes from 'osd add' - fields = {'function': 'osd'} - - ihost = ihost_utils._find_ihost(cc, args.hostnameorid) + integer_fields = ['journal_size'] user_specified_fields = dict((k, v) for (k, v) in vars(args).items() if k in field_list and not (v is None)) + for f in user_specified_fields: + try: + if f in integer_fields: + user_specified_fields[f] = int(user_specified_fields[f]) + except ValueError: + raise exc.CommandError('Journal size must be an integer ' + 'greater than 0: %s' % user_specified_fields[f]) + if 'journal_size' in user_specified_fields.keys(): user_specified_fields['journal_size_mib'] = \ - user_specified_fields.pop('journal_size') + user_specified_fields.pop('journal_size') * 1024 if 'function' in user_specified_fields.keys(): user_specified_fields['function'] = \ @@ -126,8 +142,13 @@ def do_host_stor_add(cc, args): user_specified_fields['tier_uuid'] = \ user_specified_fields['tier_uuid'].replace(" ", "") + # default values, name comes from 'osd add' + fields = {'function': 'osd'} + fields.update(user_specified_fields) + ihost = ihost_utils._find_ihost(cc, args.hostnameorid) + try: fields['ihost_uuid'] = ihost.uuid istor = cc.istor.create(**fields) @@ -162,13 +183,22 @@ def do_host_stor_update(cc, args): """Modify journal attributes for OSD.""" field_list = ['function', 'idisk_uuid', 'journal_location', 'journal_size'] + integer_fields = ['journal_size'] user_specified_fields = dict((k, v) for (k, v) in vars(args).items() if k in field_list and not (v is None)) + for f in user_specified_fields: + try: + if f in integer_fields: + user_specified_fields[f] = int(user_specified_fields[f]) + except ValueError: + raise exc.CommandError('Journal size must be an integer ' + 'greater than 0: %s' % user_specified_fields[f]) + if 'journal_size' in user_specified_fields.keys(): user_specified_fields['journal_size_mib'] = \ - user_specified_fields.pop('journal_size') + user_specified_fields.pop('journal_size') * 1024 patch = [] for (k, v) in user_specified_fields.items(): diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/partition_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/partition_shell.py index 85e6817df8..f17f7a2e7f 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/partition_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/partition_shell.py @@ -16,6 +16,7 @@ from cgtsclient import exc from cgtsclient.v1 import idisk as idisk_utils from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import partition as part_utils +import math PARTITION_MAP = {'lvm_phys_vol': constants.USER_PARTITION_PHYSICAL_VOLUME} @@ -76,8 +77,10 @@ def do_host_disk_partition_list(cc, args): for p in ipartitions: p.status = constants.PARTITION_STATUS_MSG[p.status] + p.size_mib = math.floor(float(p.size_mib) / 1024 * 1000) / 1000.0 + field_labels = ['uuid', 'device_path', 'device_node', 'type_guid', - 'type_name', 'size_mib', 'status'] + 'type_name', 'size_gib', 'status'] fields = ['uuid', 'device_path', 'device_node', 'type_guid', 'type_name', 'size_mib', 'status'] @@ -90,9 +93,9 @@ def do_host_disk_partition_list(cc, args): @utils.arg('disk_path_or_uuid', metavar='', help="UUID of the disk to place the partition [REQUIRED]") -@utils.arg('size_mib', - metavar='', - help="Requested size of the new partition in MiB [REQUIRED]") +@utils.arg('size_gib', + metavar='', + help="Requested size of the new partition in GiB [REQUIRED]") @utils.arg('-t', '--partition_type', metavar='', choices=['lvm_phys_vol'], @@ -102,8 +105,22 @@ def do_host_disk_partition_list(cc, args): def do_host_disk_partition_add(cc, args): """Add a disk partition to a disk of a specified host.""" - field_list = ['size_mib', 'partition_type'] - integer_fields = ['size_mib'] + field_list = ['size_gib', 'partition_type'] + integer_fields = ['size_gib'] + + user_fields = dict((k, v) for (k, v) in vars(args).items() + if k in field_list and not (v is None)) + + for f in user_fields: + try: + if f in integer_fields: + user_fields[f] = int(user_fields[f]) + except ValueError: + raise exc.CommandError('Partition size must be an integer ' + 'greater than 0: %s' % user_fields[f]) + + # Convert size from gib to mib + user_fields['size_mib'] = user_fields.pop('size_gib') * 1024 # Get the ihost object ihost = ihost_utils._find_ihost(cc, args.hostname_or_id) @@ -117,17 +134,6 @@ def do_host_disk_partition_add(cc, args): 'idisk_uuid': idisk.uuid, 'size_mib': 0} - user_fields = dict((k, v) for (k, v) in vars(args).items() - if k in field_list and not (v is None)) - - for f in user_fields: - try: - if f in integer_fields: - user_fields[f] = int(user_fields[f]) - except ValueError: - raise exc.CommandError('Partition size must be an integer ' - 'greater than 0: %s' % user_fields[f]) - fields.update(user_fields) # Set the requested partition GUID @@ -184,14 +190,16 @@ def do_host_disk_partition_delete(cc, args): @utils.arg('partition_path_or_uuid', metavar='', help="UUID of the partition [REQUIRED]") -@utils.arg('-s', '--size_mib', - metavar='', +@utils.arg('-s', '--size_gib', + metavar='', help=("Update the desired size of the partition")) def do_host_disk_partition_modify(cc, args): """Modify the attributes of a Disk Partition.""" # Get all the fields from the command arguments - field_list = ['size_mib'] + field_list = ['size_gib'] + integer_fields = ['size_gib'] + user_specified_fields = dict((k, v) for (k, v) in vars(args).items() if k in field_list and not (v is None)) @@ -199,6 +207,17 @@ def do_host_disk_partition_modify(cc, args): raise exc.CommandError('No update parameters specified, ' 'partition is unchanged.') + for f in user_specified_fields: + try: + if f in integer_fields: + user_specified_fields[f] = int(user_specified_fields[f]) + except ValueError: + raise exc.CommandError('Partition size must be an integer ' + 'greater than 0: %s' % user_specified_fields[f]) + + # Convert size from gib to mib + user_specified_fields['size_mib'] = user_specified_fields.pop('size_gib') * 1024 + # Get the ihost object ihost = ihost_utils._find_ihost(cc, args.hostname_or_id) diff --git a/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd b/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd index 0ceec8b4bb..5c86800421 100644 --- a/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd +++ b/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd @@ -331,7 +331,7 @@ - + diff --git a/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml b/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml index e16fe28047..8426e0e756 100644 --- a/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml +++ b/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml @@ -79,28 +79,28 @@ - - - - + + + + - - + + - - + + - - + + - - + + - + @@ -187,7 +187,7 @@ instance_backing: lvm, image, or remote concurrent_disk_operations: number of parallel I/O intensive disk operaions --> - + diff --git a/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf b/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf index 1db828b398..b509eba512 100644 --- a/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf +++ b/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf @@ -5,9 +5,10 @@ log_file=sysinv.log log_dir=/var/log/sysinv [journal] -journal_max_size=51200 -journal_min_size=1024 -journal_default_size=1024 +#Journal size in GiB +journal_max_size=50 +journal_min_size=1 +journal_default_size=1 [database] connection=postgresql://cgts:cgtspwd@localhost/cgtsdb: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py index 865a8a5aa7..ddd0a883ff 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py @@ -5,6 +5,7 @@ # import jsonpatch +import math import re import six @@ -359,15 +360,16 @@ def _partition_pre_patch_checks(partition_obj, patch_obj): if not cutils.is_int_like(p['value']): raise wsme.exc.ClientSideError( _("Requested partition size must be an integer " - "greater than 0: %s") % p['value']) + "greater than 0: %s GiB") % p['value'] / 1024) if int(p['value']) <= 0: raise wsme.exc.ClientSideError( _("Requested partition size must be an integer " - "greater than 0: %s") % p['value']) + "greater than 0: %s GiB") % p['value'] / 1024) if int(p['value']) <= partition_obj.size_mib: raise wsme.exc.ClientSideError( _("Requested partition size must be larger than current " - "size: %s <= %s") % (p['value'], partition_obj.size_mib)) + "size: %s GiB <= %s GiB") % (p['value'] / 1024, + math.floor(float(partition_obj.size_mib) / 1024 * 1000) / 1000.0)) def _is_user_created_partition(guid): @@ -487,8 +489,9 @@ def _semantic_checks(operation, partition): # partition. if not _enough_avail_space_on_disk(partition.get('size_mib'), idisk): raise wsme.exc.ClientSideError( - _("Requested size %s MiB is larger than the %s MiB " - "available.") % (partition['size_mib'],idisk.available_mib)) + _("Requested size %s GiB is larger than the %s GiB " + "available.") % (partition['size_mib'] / 1024, + math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0)) _are_partition_operations_simultaneous(ihost, partition, constants.PARTITION_CMD_CREATE) @@ -566,8 +569,9 @@ def _semantic_checks(operation, partition): # Check if there is enough space to enlarge the partition. if not _enough_avail_space_on_disk(extra_size, idisk): raise wsme.exc.ClientSideError( - _("Requested extra size %s MiB is larger than the %s MiB " - "available.") % (extra_size,idisk.available_mib)) + _("Requested extra size %s GiB is larger than the %s GiB " + "available.") % (extra_size / 1024, + math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0)) elif operation == constants.PARTITION_CMD_DELETE: ############ diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py index 3324c40e55..cea36f854a 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py @@ -1599,8 +1599,9 @@ def _create_storage_profile(profile_name, profile_node): for disk in disks: dev_path = disk.get('path') dev_func = disk.get('volumeFunc') - dev_size = int(disk.get('size')) - journal_size = int(disk.get('journalSize', '0')) + ## Convert from GiB to MiB + dev_size = int(disk.get('size')) * 1024 + journal_size = int(disk.get('journalSize', '0')) * 1024 tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[ constants.SB_TIER_TYPE_CEPH]) if not dev_path: @@ -1623,10 +1624,10 @@ def _create_storage_profile(profile_name, profile_node): ' is invalid') % profile_name, \ _('device path %(dev)s journal size of %(size)s' ' is invalid.') % {'dev': dev_path, - 'size': journal_size}, \ + 'size': journal_size / 1024}, \ _('size should be between %(min)s and ' - ' %(max)s.') % {'min': CONF.journal.journal_min_size, - 'max': CONF.journal.journal_max_size} + ' %(max)s.') % {'min': CONF.journal.journal_min_size / 1024, + 'max': CONF.journal.journal_max_size / 1024} if dev_func == constants.STOR_FUNCTION_JOURNAL: journal_disks.append(dev_path) @@ -1672,7 +1673,8 @@ def _create_storage_profile(profile_name, profile_node): dev_func = disk.get('volumeFunc') if dev_func == constants.STOR_FUNCTION_JOURNAL: dev_path = disk.get('path') - dev_size = int(disk.get('size')) + # Convert disk size from GiB to MiB + dev_size = int(disk.get('size')) * 1024 ddict = {'device_path': dev_path, 'size_mib': dev_size, 'forihostid': profile_id, @@ -1690,7 +1692,8 @@ def _create_storage_profile(profile_name, profile_node): for disk in disks: dev_path = disk.get('path') dev_func = disk.get('volumeFunc') - dev_size = int(disk.get('size')) + # convert disk size from GiB to MiB + dev_size = int(disk.get('size')) * 1024 tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[ constants.SB_TIER_TYPE_CEPH]) @@ -1706,8 +1709,10 @@ def _create_storage_profile(profile_name, profile_node): default_size = CONF.journal.journal_default_size if len(journals) > 0: # we don't expect collocated journals - journal_size = disk.get('journalSize', - default_size) + if disk.get('journalSize'): + journal_size = int(disk.get('journalSize')) * 1024 + else: + journal_size = default_size sdict['journal_size_mib'] = journal_size if len(journals) > 1: # multiple journal disks are available, use @@ -1774,7 +1779,8 @@ def _create_localstorage_profile(profile_name, profile_node): for disk in disks: dev_path = disk.get('path') - dev_size = int(disk.get('size')) + # Convert disk size from GiB to MiB + dev_size = int(disk.get('size')) * 1024 dev_func = disk.get('volumeFunc') if dev_func and dev_func in prohibitedFuncs: @@ -1808,7 +1814,8 @@ def _create_localstorage_profile(profile_name, profile_node): instance_backing = ilvg.get(constants.LVG_NOVA_PARAM_BACKING) concurrent_disk_operations = ilvg.get(constants.LVG_NOVA_PARAM_DISK_OPS) if instance_backing == constants.LVG_NOVA_BACKING_LVM: - instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ) + instances_lv_size_mib = \ + int(ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB)) * 1024 if not instances_lv_size_mib: return ("Error", _('error: importing Local Storage profile %s ' 'failed.') % @@ -1820,26 +1827,26 @@ def _create_localstorage_profile(profile_name, profile_node): constants.LVG_NOVA_PARAM_DISK_OPS: int(concurrent_disk_operations)} elif instance_backing == constants.LVG_NOVA_BACKING_IMAGE: - instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ) - if instances_lv_size_mib: + if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB): return ("Error", _('error: Local Storage profile %s is invalid') % profile_name, - _('instances_lv_size_mib (%s) must not be set for ' - 'image backed instance') % instances_lv_size_mib) + _('instances_lv_size_gib (%s) must not be set for ' + 'image backed instance') + % ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB)) capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING: constants.LVG_NOVA_BACKING_IMAGE, constants.LVG_NOVA_PARAM_DISK_OPS: int(concurrent_disk_operations)} elif instance_backing == constants.LVG_NOVA_BACKING_REMOTE: - instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ) - if instances_lv_size_mib: + if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB): return ("Error", _('error: Local Storage profile %s is invalid') % profile_name, - _('instances_lv_size_mib (%s) must not be set for ' - 'remote backed instance') % instances_lv_size_mib) + _('instances_lv_size_gib (%s) must not be set for ' + 'remote backed instance') + % ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB)) capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING: constants.LVG_NOVA_BACKING_REMOTE, @@ -1860,7 +1867,7 @@ def _create_localstorage_profile(profile_name, profile_node): for disk in disks: dev_path = disk.get('path') - dev_size = int(disk.get('size')) + dev_size = int(disk.get('size')) * 1024 ddict = {'device_path': dev_path, 'size_mib': dev_size, diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_lvm.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_lvm.py index 4634d181d0..63be395a4b 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_lvm.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_lvm.py @@ -21,6 +21,7 @@ import jsonpatch import copy +import math from oslo_serialization import jsonutils @@ -386,20 +387,20 @@ def _discover_and_validate_cinder_hiera_data(caps_dict): if len(valid_ctrls) == 2: if pv_sizes[0]['size'] != pv_sizes[1]['size']: msg = (_('Allocated storage for %s PVs must be equal and greater than ' - '%s MiB on both controllers. Allocation for %s is %s MiB ' - 'while for %s is %s MiB.') % + '%s GiB on both controllers. Allocation for %s is %s GiB ' + 'while for %s is %s GiB.') % (constants.LVG_CINDER_VOLUMES, - constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024, - pv_sizes[0]['host'], pv_sizes[0]['size'], - pv_sizes[1]['host'], pv_sizes[1]['size'])) + constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB, + pv_sizes[0]['host'], math.floor(float(pv_sizes[0]['size']) / 1024 * 1000) / 1000.0, + pv_sizes[1]['host'], math.floor(float(pv_sizes[1]['size']) / 1024 * 1000) / 1000.0)) raise wsme.exc.ClientSideError(msg) if pv_sizes[0]['size'] < (constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024): - msg = (_('Minimum allocated storage for %s PVs is: %s MiB. ' - 'Current allocation is: %s MiB.') % + msg = (_('Minimum allocated storage for %s PVs is: %s GiB. ' + 'Current allocation is: %s GiB.') % (constants.LVG_CINDER_VOLUMES, - constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024, - pv_sizes[0]['size'])) + constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB, + math.floor(float(pv_sizes[0]['size']) / 1024 * 1000) / 1000.0)) raise wsme.exc.ClientSideError(msg) # Log all the LVM parameters diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 34d4da727a..a9d3e5f5d8 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -474,6 +474,7 @@ PV_NAME_UNKNOWN = 'unknown' # Storage: Volume Group Parameter Types LVG_NOVA_PARAM_BACKING = 'instance_backing' LVG_NOVA_PARAM_INST_LV_SZ = 'instances_lv_size_mib' +LVG_NOVA_PARAM_INST_LV_SZ_GIB = 'instances_lv_size_gib' LVG_NOVA_PARAM_DISK_OPS = 'concurrent_disk_operations' LVG_CINDER_PARAM_LVM_TYPE = 'lvm_type'