Use GiB for storage related size display

This commit delivers the code change in cgcsclient and sysinv to unify
storage related size (e.g. disk, partition, lvg, journal) display/input
to use GIB. It also delivers change in lab_setup.conf for all the labs
because now the partition and lvg size input is in GiB instead of MiB.

Change-Id: I7a9c88a02ce8bdd4ee43141cd149bb82b68e6e9d
This commit is contained in:
Wei Zhou 2018-04-15 11:25:06 -04:00 committed by Jack Ding
parent d8c8412cb4
commit 3f86a35665
13 changed files with 211 additions and 114 deletions

View File

@ -14,6 +14,7 @@ from cgtsclient.common import constants
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils
import math
def _print_idisk_show(idisk):
@ -22,7 +23,7 @@ def _print_idisk_show(idisk):
'ihost_uuid', 'istor_uuid', 'ipv_uuid', 'created_at',
'updated_at']
labels = ['device_node', 'device_num', 'device_type', 'device_path',
'size_mib', 'available_mib', 'rpm', 'serial_id', 'uuid',
'size_gib', 'available_gib', 'rpm', 'serial_id', 'uuid',
'ihost_uuid', 'istor_uuid', 'ipv_uuid', 'created_at',
'updated_at']
data = [(f, getattr(idisk, f, '')) for f in fields]
@ -50,6 +51,11 @@ def do_host_disk_show(cc, args):
"""Show disk attributes."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
idisk = _find_disk(cc, ihost, args.device_nodeoruuid)
# Convert size from mib to gib and round it down
idisk.size_mib = math.floor(float(idisk.size_mib) / 1024 * 1000) / 1000.0
idisk.available_mib = math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0
_print_idisk_show(idisk)
@ -64,12 +70,17 @@ def do_host_disk_list(cc, args):
idisks = cc.idisk.list(ihost.uuid)
field_labels = ['uuid', 'device_node', 'device_num', 'device_type',
'size_mib', 'available_mib', 'rpm', 'serial_id',
'size_gib', 'available_gib', 'rpm', 'serial_id',
'device_path']
fields = ['uuid', 'device_node', 'device_num', 'device_type',
'size_mib', 'available_mib', 'rpm', 'serial_id',
'device_path']
# Convert size from mib to gib and round it down
for i in idisks:
i.size_mib = math.floor(float(i.size_mib) / 1024 * 1000) / 1000.0
i.available_mib = math.floor(float(i.available_mib) / 1024 * 1000) / 1000.0
utils.print_list(idisks, fields, field_labels, sortby=1)

View File

@ -18,6 +18,11 @@ from oslo_serialization import jsonutils
def _print_ilvg_show(ilvg):
labels = ['lvm_vg_name', 'vg_state', 'uuid', 'ihost_uuid', 'lvm_vg_access',
'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv',
'lvm_vg_size_gib', 'lvm_vg_total_pe', 'lvm_vg_free_pe', 'created_at',
'updated_at', 'parameters']
fields = ['lvm_vg_name', 'vg_state', 'uuid', 'ihost_uuid', 'lvm_vg_access',
'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv', 'lvm_cur_pv',
'lvm_vg_size', 'lvm_vg_total_pe', 'lvm_vg_free_pe', 'created_at',
@ -32,7 +37,7 @@ def _print_ilvg_show(ilvg):
# rename capabilities for display purposes and add to display list
data.append(('parameters', getattr(ilvg, 'capabilities', '')))
utils.print_tuple_list(data)
utils.print_tuple_list(data, labels)
def _find_lvg(cc, ihost, lvguuid):
@ -171,11 +176,11 @@ def do_host_lvg_delete(cc, args):
help=("Set the number of concurrent I/O intensive disk operations "
"such as glance image downloads, image format conversions, "
"etc. [nova-local]"))
@utils.arg('-s', '--instances_lv_size_mib',
metavar='<instances_lv size in MiB>',
help=("Set the desired size (in MiB) of the instances LV that is "
@utils.arg('-s', '--instances_lv_size_gib',
metavar='<instances_lv size in GiB>',
help=("Set the desired size (in GiB) of the instances LV that is "
"used for /etc/nova/instances. Example: For a 50GB volume, "
"use 51200. Required when instance backing is \"lvm\". "
"use 50. Required when instance backing is \"lvm\". "
"[nova-local]"))
@utils.arg('-l', '--lvm_type',
metavar='<lvm_type>',
@ -187,22 +192,29 @@ def do_host_lvg_modify(cc, args):
# Get all the fields from the command arguments
field_list = ['hostnameorid', 'lvgnameoruuid',
'instance_backing', 'instances_lv_size_mib',
'instance_backing', 'instances_lv_size_gib',
'concurrent_disk_operations', 'lvm_type']
fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
all_caps_list = ['instance_backing', 'instances_lv_size_mib',
all_caps_list = ['instance_backing', 'instances_lv_size_gib',
'concurrent_disk_operations', 'lvm_type']
integer_fields = ['instances_lv_size_mib', 'concurrent_disk_operations']
integer_fields = ['instances_lv_size_gib', 'concurrent_disk_operations']
requested_caps_dict = {}
for cap in all_caps_list:
if cap in fields:
if cap in integer_fields:
requested_caps_dict[cap] = int(fields[cap])
else:
requested_caps_dict[cap] = fields[cap]
try:
if cap in integer_fields:
requested_caps_dict[cap] = int(fields[cap])
else:
requested_caps_dict[cap] = fields[cap]
if cap == 'instances_lv_size_gib':
requested_caps_dict['instances_lv_size_mib'] = \
requested_caps_dict.pop('instances_lv_size_gib') * 1024
except ValueError:
raise exc.CommandError('instances_lv size must be an integer '
'greater than 0: %s' % fields[cap])
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)

View File

@ -17,6 +17,7 @@ from cgtsclient.v1 import ethernetport as ethernetport_utils
from cgtsclient.v1 import icpu as icpu_utils
from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import iprofile as iprofile_utils
import math
#
# INTERFACE PROFILES
@ -311,7 +312,7 @@ def get_storconfig_detailed(iprofile):
if stor.function == 'journal' and count > 1:
str += " %s" % journals[stor.uuid]
if stor.function == 'osd':
str += ", ceph journal: size %s MiB, " % stor.journal_size_mib
str += ", ceph journal: size %s GiB, " % (stor.journal_size_mib / 1024)
if stor.journal_location == stor.uuid:
str += "collocated on osd stor"
else:
@ -329,7 +330,7 @@ def get_diskconfig(iprofile):
for disk in iprofile.disks:
if str != '':
str = str + "; "
str = str + "%s: %s" % (disk.device_path, disk.size_mib)
str = str + "%s: %s GiB" % (disk.device_path, math.floor(float(disk.size_mib) / 1024 * 1000) / 1000.0)
if not disk.device_path:
invalid_profile = True
return str, invalid_profile
@ -340,7 +341,7 @@ def get_partconfig(iprofile):
for part in iprofile.partitions:
if str != '':
str = str + "; "
str = str + "%s: %s" % (part.device_path, part.size_mib)
str = str + "%s: %s GiB" % (part.device_path, math.floor(float(part.size_mib) / 1024 * 1000) / 1000.0)
return str
@ -354,6 +355,9 @@ def get_ilvg_config(iprofile):
for k, v in ilvg.capabilities.iteritems():
if capabilities_str != '':
capabilities_str += "; "
if k == "instances_lv_size_mib":
k = "instances_lv_size_gib"
v = v / 1024
capabilities_str += "%s: %s " % (k, v)
str += "%s, %s" % (ilvg.lvm_vg_name, capabilities_str)

View File

@ -15,16 +15,23 @@ from cgtsclient.v1 import idisk as idisk_utils
from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import ilvg as ilvg_utils
from cgtsclient.v1 import partition as partition_utils
import math
def _print_ipv_show(ipv):
labels = ['uuid', 'pv_state', 'pv_type', 'disk_or_part_uuid',
'disk_or_part_device_node', 'disk_or_part_device_path',
'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid',
'lvm_pv_size_gib', 'lvm_pe_total', 'lvm_pe_alloced', 'ihost_uuid',
'created_at', 'updated_at']
fields = ['uuid', 'pv_state', 'pv_type', 'disk_or_part_uuid',
'disk_or_part_device_node', 'disk_or_part_device_path',
'lvm_pv_name', 'lvm_vg_name', 'lvm_pv_uuid',
'lvm_pv_size', 'lvm_pe_total', 'lvm_pe_alloced', 'ihost_uuid',
'created_at', 'updated_at']
ipv.lvm_pv_size = math.floor(float(ipv.lvm_pv_size) / (1024 ** 3) * 1000) / 1000.0
data = [(f, getattr(ipv, f, '')) for f in fields]
utils.print_tuple_list(data)
utils.print_tuple_list(data, labels)
def _find_pv(cc, ihost, pvuuid):

View File

@ -20,8 +20,12 @@ def _print_istor_show(istor):
'journal_size_mib', 'journal_path', 'journal_node',
'uuid', 'ihost_uuid', 'idisk_uuid', 'tier_uuid', 'tier_name',
'created_at', 'updated_at']
labels = ['osdid', 'function', 'journal_location',
'journal_size_gib', 'journal_path', 'journal_node',
'uuid', 'ihost_uuid', 'idisk_uuid', 'tier_uuid', 'tier_name',
'created_at', 'updated_at']
data = [(f, getattr(istor, f, '')) for f in fields]
utils.print_tuple_list(data)
utils.print_tuple_list(data, labels)
def _find_stor(cc, ihost, storuuid):
@ -50,6 +54,10 @@ def do_host_stor_show(cc, args):
i = cc.istor.get(args.storuuid)
# convert journal size from mib to gib when display
if i.journal_size_mib:
i.journal_size_mib = i.journal_size_mib / 1024
_print_istor_show(i)
@ -64,9 +72,13 @@ def do_host_stor_list(cc, args):
for i in istors:
istor_utils._get_disks(cc, ihost, i)
# convert journal size from mib to gib when display
if i.journal_size_mib:
i.journal_size_mib = i.journal_size_mib / 1024
field_labels = ['uuid', 'function', 'osdid', 'capabilities',
'idisk_uuid', 'journal_path', 'journal_node',
'journal_size_mib', 'tier_name']
'journal_size_gib', 'tier_name']
fields = ['uuid', 'function', 'osdid', 'capabilities',
'idisk_uuid', 'journal_path', 'journal_node', 'journal_size_mib',
'tier_name']
@ -91,10 +103,10 @@ def do_host_stor_list(cc, args):
default=None,
help="Location of stor's journal")
@utils.arg('--journal-size',
metavar='<size of the journal (MiB)>',
metavar='<size of the journal (GiB)>',
nargs='?',
default=None,
help="Size of stor's journal, in MiB")
help="Size of stor's journal, in GiB")
@utils.arg('--tier-uuid',
metavar='<storage tier uuid>',
nargs='?',
@ -105,18 +117,22 @@ def do_host_stor_add(cc, args):
field_list = ['function', 'idisk_uuid', 'journal_location', 'journal_size',
'tier_uuid']
# default values, name comes from 'osd add'
fields = {'function': 'osd'}
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
integer_fields = ['journal_size']
user_specified_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
for f in user_specified_fields:
try:
if f in integer_fields:
user_specified_fields[f] = int(user_specified_fields[f])
except ValueError:
raise exc.CommandError('Journal size must be an integer '
'greater than 0: %s' % user_specified_fields[f])
if 'journal_size' in user_specified_fields.keys():
user_specified_fields['journal_size_mib'] = \
user_specified_fields.pop('journal_size')
user_specified_fields.pop('journal_size') * 1024
if 'function' in user_specified_fields.keys():
user_specified_fields['function'] = \
@ -126,8 +142,13 @@ def do_host_stor_add(cc, args):
user_specified_fields['tier_uuid'] = \
user_specified_fields['tier_uuid'].replace(" ", "")
# default values, name comes from 'osd add'
fields = {'function': 'osd'}
fields.update(user_specified_fields)
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
try:
fields['ihost_uuid'] = ihost.uuid
istor = cc.istor.create(**fields)
@ -162,13 +183,22 @@ def do_host_stor_update(cc, args):
"""Modify journal attributes for OSD."""
field_list = ['function', 'idisk_uuid', 'journal_location', 'journal_size']
integer_fields = ['journal_size']
user_specified_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
for f in user_specified_fields:
try:
if f in integer_fields:
user_specified_fields[f] = int(user_specified_fields[f])
except ValueError:
raise exc.CommandError('Journal size must be an integer '
'greater than 0: %s' % user_specified_fields[f])
if 'journal_size' in user_specified_fields.keys():
user_specified_fields['journal_size_mib'] = \
user_specified_fields.pop('journal_size')
user_specified_fields.pop('journal_size') * 1024
patch = []
for (k, v) in user_specified_fields.items():

View File

@ -16,6 +16,7 @@ from cgtsclient import exc
from cgtsclient.v1 import idisk as idisk_utils
from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import partition as part_utils
import math
PARTITION_MAP = {'lvm_phys_vol': constants.USER_PARTITION_PHYSICAL_VOLUME}
@ -76,8 +77,10 @@ def do_host_disk_partition_list(cc, args):
for p in ipartitions:
p.status = constants.PARTITION_STATUS_MSG[p.status]
p.size_mib = math.floor(float(p.size_mib) / 1024 * 1000) / 1000.0
field_labels = ['uuid', 'device_path', 'device_node', 'type_guid',
'type_name', 'size_mib', 'status']
'type_name', 'size_gib', 'status']
fields = ['uuid', 'device_path', 'device_node', 'type_guid', 'type_name',
'size_mib', 'status']
@ -90,9 +93,9 @@ def do_host_disk_partition_list(cc, args):
@utils.arg('disk_path_or_uuid',
metavar='<disk path or uuid>',
help="UUID of the disk to place the partition [REQUIRED]")
@utils.arg('size_mib',
metavar='<partition size in MiB>',
help="Requested size of the new partition in MiB [REQUIRED]")
@utils.arg('size_gib',
metavar='<partition size in GiB>',
help="Requested size of the new partition in GiB [REQUIRED]")
@utils.arg('-t', '--partition_type',
metavar='<partition type>',
choices=['lvm_phys_vol'],
@ -102,8 +105,22 @@ def do_host_disk_partition_list(cc, args):
def do_host_disk_partition_add(cc, args):
"""Add a disk partition to a disk of a specified host."""
field_list = ['size_mib', 'partition_type']
integer_fields = ['size_mib']
field_list = ['size_gib', 'partition_type']
integer_fields = ['size_gib']
user_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
for f in user_fields:
try:
if f in integer_fields:
user_fields[f] = int(user_fields[f])
except ValueError:
raise exc.CommandError('Partition size must be an integer '
'greater than 0: %s' % user_fields[f])
# Convert size from gib to mib
user_fields['size_mib'] = user_fields.pop('size_gib') * 1024
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostname_or_id)
@ -117,17 +134,6 @@ def do_host_disk_partition_add(cc, args):
'idisk_uuid': idisk.uuid,
'size_mib': 0}
user_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
for f in user_fields:
try:
if f in integer_fields:
user_fields[f] = int(user_fields[f])
except ValueError:
raise exc.CommandError('Partition size must be an integer '
'greater than 0: %s' % user_fields[f])
fields.update(user_fields)
# Set the requested partition GUID
@ -184,14 +190,16 @@ def do_host_disk_partition_delete(cc, args):
@utils.arg('partition_path_or_uuid',
metavar='<partition path or uuid>',
help="UUID of the partition [REQUIRED]")
@utils.arg('-s', '--size_mib',
metavar='<partition size in MiB>',
@utils.arg('-s', '--size_gib',
metavar='<partition size in GiB>',
help=("Update the desired size of the partition"))
def do_host_disk_partition_modify(cc, args):
"""Modify the attributes of a Disk Partition."""
# Get all the fields from the command arguments
field_list = ['size_mib']
field_list = ['size_gib']
integer_fields = ['size_gib']
user_specified_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
@ -199,6 +207,17 @@ def do_host_disk_partition_modify(cc, args):
raise exc.CommandError('No update parameters specified, '
'partition is unchanged.')
for f in user_specified_fields:
try:
if f in integer_fields:
user_specified_fields[f] = int(user_specified_fields[f])
except ValueError:
raise exc.CommandError('Partition size must be an integer '
'greater than 0: %s' % user_specified_fields[f])
# Convert size from gib to mib
user_specified_fields['size_mib'] = user_specified_fields.pop('size_gib') * 1024
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostname_or_id)

View File

@ -331,7 +331,7 @@
<xs:extension base="xs:string">
<xs:attribute type="Lvm_vg_name" name="lvm_vg_name" use="required" />
<xs:attribute type="Instance_backing" name="instance_backing" use="required" />
<xs:attribute type="xs:positiveInteger" name="instances_lv_size_mib" use="optional" />
<xs:attribute type="xs:positiveInteger" name="instances_lv_size_gib" use="optional" />
<xs:attribute type="xs:positiveInteger" name="concurrent_disk_operations" use="required" />
</xs:extension>
</xs:simpleContent>

View File

@ -79,28 +79,28 @@
<storageProfile name="storage-profile">
<!--The disk tags below define each device,
path: device path
size: minimum size (in MB)
size: minimum size (in GiB)
volumeFunc: volume function to be assigned to the device
For 'osd' function:
journalSize: the size of the ceph journal, if absent defaults to journal_default_size in sysinv.conf
journalSize: the size of the ceph journal in GiB, if absent defaults to journal_default_size in sysinv.conf
journalLocation: location of the journal partition, mandatory if multiple journal functions are defined,
if absent defaults to the single available journal drive.
If no device with journal function is configured then the journals for all OSDs will be collocated on the
same device with the OSD data (partition #1 is for the data and partition #2 for the journl).
In this case the size of the journal will be journal_default_size.-->
<disk path="/dev/sdb" size="228936" volumeFunc="osd" journalSize="2048" journalLocation="/dev/sdd" />
<disk path="/dev/sdc" size="228936" volumeFunc="osd" journalLocation="/dev/sde" />
<disk path="/dev/sdd" size="228936" volumeFunc="journal" />
<disk path="/dev/sde" size="228936" volumeFunc="journal" />
<disk path="/dev/sdb" size="223" volumeFunc="osd" journalSize="2" journalLocation="/dev/sdd" />
<disk path="/dev/sdc" size="223" volumeFunc="osd" journalLocation="/dev/sde" />
<disk path="/dev/sdd" size="223" volumeFunc="journal" />
<disk path="/dev/sde" size="223" volumeFunc="journal" />
</storageProfile>
<storageProfile name="storage-profile-coloc-no-tier">
<!--The disk tags below define each device,
path: device path
size: minimum size (in MB)
size: minimum size (in GiB)
volumeFunc: volume function to be assigned to the device
For 'osd' function:
journalSize: the size of the ceph journal, if absent defaults to journal_default_size in sysinv.conf
journalSize: the size of the ceph journal in GiB, if absent defaults to journal_default_size in sysinv.conf
journalLocation: location of the journal partition, mandatory if multiple journal functions are defined,
if absent defaults to the single available journal drive.
tier: storage tier for OSDs. If this is not specified, then the
@ -109,17 +109,17 @@
same device with the OSD data (partition #1 is for the data and partition #2 for the journl).
In this case the size of the journal will be journal_default_size.
-->
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="51200" volumeFunc="osd"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="51200" volumeFunc="osd"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="50" volumeFunc="osd"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="50" volumeFunc="osd"/>
</storageProfile>
<storageProfile name="storage-profile-journal-no-tier">
<!--The disk tags below define each device,
path: device path
size: minimum size (in MB)
size: minimum size (in GiB)
volumeFunc: volume function to be assigned to the device
For 'osd' function:
journalSize: the size of the ceph journal, if absent defaults to journal_default_size in sysinv.conf
journalSize: the size of the ceph journal in GiB, if absent defaults to journal_default_size in sysinv.conf
journalLocation: location of the journal partition, mandatory if multiple journal functions are defined,
if absent defaults to the single available journal drive.
tier: storage tier for OSDs. If this is not specified, then the
@ -128,17 +128,17 @@
same device with the OSD data (partition #1 is for the data and partition #2 for the journl).
In this case the size of the journal will be journal_default_size.
-->
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="51200" volumeFunc="osd"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="51200" volumeFunc="journal"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="50" volumeFunc="osd"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="50" volumeFunc="journal"/>
</storageProfile>
<storageProfile name="storage-profile-coloc-two-tiers">
<!--The disk tags below define each device,
path: device path
size: minimum size (in MB)
size: minimum size (in GiB)
volumeFunc: volume function to be assigned to the device
For 'osd' function:
journalSize: the size of the ceph journal, if absent defaults to journal_default_size in sysinv.conf
journalSize: the size of the ceph journal in GiB, if absent defaults to journal_default_size in sysinv.conf
journalLocation: location of the journal partition, mandatory if multiple journal functions are defined,
if absent defaults to the single available journal drive.
tier: storage tier for OSDs. If this is not specified, then the
@ -147,34 +147,34 @@
same device with the OSD data (partition #1 is for the data and partition #2 for the journl).
In this case the size of the journal will be journal_default_size.
-->
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="51200" volumeFunc="osd" tier="storage"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="51200" volumeFunc="osd" tier="gold"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0" size="50" volumeFunc="osd" tier="storage"/>
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="50" volumeFunc="osd" tier="gold"/>
</storageProfile>
<localstorageProfile name="localstorage-profile_lvm">
<!--The disk tags below define each device,
node: device node
size: minimum size (in MB).
size: minimum size (in GiB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: lvm, image, or remote
instances_lv_size_mib: local volume size in MiBytes
instances_lv_size_gib: local volume size in GiB
concurrent_disk_operations: number of parallel I/O intensive disk operaions
-->
<disk path="/dev/sdb" size="228936" />
<lvg lvm_vg_name="nova-local" instance_backing="lvm" instances_lv_size_mib="2048" concurrent_disk_operations="2" />
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="lvm" instances_lv_size_gib="2" concurrent_disk_operations="2" />
</localstorageProfile>
<localstorageProfile name="localstorage-profile_localimage">
<!--The disk tags below define each device,
node: device node
size: minimum size (in MB).
size: minimum size (in GiB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: lvm, image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operaions
-->
<disk path="/dev/sdb" size="228936" />
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="image" concurrent_disk_operations="2" />
</localstorageProfile>
@ -187,7 +187,7 @@
instance_backing: lvm, image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operaions
-->
<disk path="/dev/sdb" size="228936" />
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="remote" concurrent_disk_operations="2" />
</localstorageProfile>

View File

@ -5,9 +5,10 @@ log_file=sysinv.log
log_dir=/var/log/sysinv
[journal]
journal_max_size=51200
journal_min_size=1024
journal_default_size=1024
#Journal size in GiB
journal_max_size=50
journal_min_size=1
journal_default_size=1
[database]
connection=postgresql://cgts:cgtspwd@localhost/cgtsdb:

View File

@ -5,6 +5,7 @@
#
import jsonpatch
import math
import re
import six
@ -359,15 +360,16 @@ def _partition_pre_patch_checks(partition_obj, patch_obj):
if not cutils.is_int_like(p['value']):
raise wsme.exc.ClientSideError(
_("Requested partition size must be an integer "
"greater than 0: %s") % p['value'])
"greater than 0: %s GiB") % p['value'] / 1024)
if int(p['value']) <= 0:
raise wsme.exc.ClientSideError(
_("Requested partition size must be an integer "
"greater than 0: %s") % p['value'])
"greater than 0: %s GiB") % p['value'] / 1024)
if int(p['value']) <= partition_obj.size_mib:
raise wsme.exc.ClientSideError(
_("Requested partition size must be larger than current "
"size: %s <= %s") % (p['value'], partition_obj.size_mib))
"size: %s GiB <= %s GiB") % (p['value'] / 1024,
math.floor(float(partition_obj.size_mib) / 1024 * 1000) / 1000.0))
def _is_user_created_partition(guid):
@ -487,8 +489,9 @@ def _semantic_checks(operation, partition):
# partition.
if not _enough_avail_space_on_disk(partition.get('size_mib'), idisk):
raise wsme.exc.ClientSideError(
_("Requested size %s MiB is larger than the %s MiB "
"available.") % (partition['size_mib'],idisk.available_mib))
_("Requested size %s GiB is larger than the %s GiB "
"available.") % (partition['size_mib'] / 1024,
math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0))
_are_partition_operations_simultaneous(ihost, partition,
constants.PARTITION_CMD_CREATE)
@ -566,8 +569,9 @@ def _semantic_checks(operation, partition):
# Check if there is enough space to enlarge the partition.
if not _enough_avail_space_on_disk(extra_size, idisk):
raise wsme.exc.ClientSideError(
_("Requested extra size %s MiB is larger than the %s MiB "
"available.") % (extra_size,idisk.available_mib))
_("Requested extra size %s GiB is larger than the %s GiB "
"available.") % (extra_size / 1024,
math.floor(float(idisk.available_mib) / 1024 * 1000) / 1000.0))
elif operation == constants.PARTITION_CMD_DELETE:
############

View File

@ -1599,8 +1599,9 @@ def _create_storage_profile(profile_name, profile_node):
for disk in disks:
dev_path = disk.get('path')
dev_func = disk.get('volumeFunc')
dev_size = int(disk.get('size'))
journal_size = int(disk.get('journalSize', '0'))
## Convert from GiB to MiB
dev_size = int(disk.get('size')) * 1024
journal_size = int(disk.get('journalSize', '0')) * 1024
tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH])
if not dev_path:
@ -1623,10 +1624,10 @@ def _create_storage_profile(profile_name, profile_node):
' is invalid') % profile_name, \
_('device path %(dev)s journal size of %(size)s'
' is invalid.') % {'dev': dev_path,
'size': journal_size}, \
'size': journal_size / 1024}, \
_('size should be between %(min)s and '
' %(max)s.') % {'min': CONF.journal.journal_min_size,
'max': CONF.journal.journal_max_size}
' %(max)s.') % {'min': CONF.journal.journal_min_size / 1024,
'max': CONF.journal.journal_max_size / 1024}
if dev_func == constants.STOR_FUNCTION_JOURNAL:
journal_disks.append(dev_path)
@ -1672,7 +1673,8 @@ def _create_storage_profile(profile_name, profile_node):
dev_func = disk.get('volumeFunc')
if dev_func == constants.STOR_FUNCTION_JOURNAL:
dev_path = disk.get('path')
dev_size = int(disk.get('size'))
# Convert disk size from GiB to MiB
dev_size = int(disk.get('size')) * 1024
ddict = {'device_path': dev_path,
'size_mib': dev_size,
'forihostid': profile_id,
@ -1690,7 +1692,8 @@ def _create_storage_profile(profile_name, profile_node):
for disk in disks:
dev_path = disk.get('path')
dev_func = disk.get('volumeFunc')
dev_size = int(disk.get('size'))
# convert disk size from GiB to MiB
dev_size = int(disk.get('size')) * 1024
tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH])
@ -1706,8 +1709,10 @@ def _create_storage_profile(profile_name, profile_node):
default_size = CONF.journal.journal_default_size
if len(journals) > 0:
# we don't expect collocated journals
journal_size = disk.get('journalSize',
default_size)
if disk.get('journalSize'):
journal_size = int(disk.get('journalSize')) * 1024
else:
journal_size = default_size
sdict['journal_size_mib'] = journal_size
if len(journals) > 1:
# multiple journal disks are available, use
@ -1774,7 +1779,8 @@ def _create_localstorage_profile(profile_name, profile_node):
for disk in disks:
dev_path = disk.get('path')
dev_size = int(disk.get('size'))
# Convert disk size from GiB to MiB
dev_size = int(disk.get('size')) * 1024
dev_func = disk.get('volumeFunc')
if dev_func and dev_func in prohibitedFuncs:
@ -1808,7 +1814,8 @@ def _create_localstorage_profile(profile_name, profile_node):
instance_backing = ilvg.get(constants.LVG_NOVA_PARAM_BACKING)
concurrent_disk_operations = ilvg.get(constants.LVG_NOVA_PARAM_DISK_OPS)
if instance_backing == constants.LVG_NOVA_BACKING_LVM:
instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ)
instances_lv_size_mib = \
int(ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB)) * 1024
if not instances_lv_size_mib:
return ("Error", _('error: importing Local Storage profile %s '
'failed.') %
@ -1820,26 +1827,26 @@ def _create_localstorage_profile(profile_name, profile_node):
constants.LVG_NOVA_PARAM_DISK_OPS:
int(concurrent_disk_operations)}
elif instance_backing == constants.LVG_NOVA_BACKING_IMAGE:
instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ)
if instances_lv_size_mib:
if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB):
return ("Error",
_('error: Local Storage profile %s is invalid')
% profile_name,
_('instances_lv_size_mib (%s) must not be set for '
'image backed instance') % instances_lv_size_mib)
_('instances_lv_size_gib (%s) must not be set for '
'image backed instance')
% ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB))
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_IMAGE,
constants.LVG_NOVA_PARAM_DISK_OPS:
int(concurrent_disk_operations)}
elif instance_backing == constants.LVG_NOVA_BACKING_REMOTE:
instances_lv_size_mib = ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ)
if instances_lv_size_mib:
if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB):
return ("Error",
_('error: Local Storage profile %s is invalid')
% profile_name,
_('instances_lv_size_mib (%s) must not be set for '
'remote backed instance') % instances_lv_size_mib)
_('instances_lv_size_gib (%s) must not be set for '
'remote backed instance')
% ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB))
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_REMOTE,
@ -1860,7 +1867,7 @@ def _create_localstorage_profile(profile_name, profile_node):
for disk in disks:
dev_path = disk.get('path')
dev_size = int(disk.get('size'))
dev_size = int(disk.get('size')) * 1024
ddict = {'device_path': dev_path,
'size_mib': dev_size,

View File

@ -21,6 +21,7 @@
import jsonpatch
import copy
import math
from oslo_serialization import jsonutils
@ -386,20 +387,20 @@ def _discover_and_validate_cinder_hiera_data(caps_dict):
if len(valid_ctrls) == 2:
if pv_sizes[0]['size'] != pv_sizes[1]['size']:
msg = (_('Allocated storage for %s PVs must be equal and greater than '
'%s MiB on both controllers. Allocation for %s is %s MiB '
'while for %s is %s MiB.') %
'%s GiB on both controllers. Allocation for %s is %s GiB '
'while for %s is %s GiB.') %
(constants.LVG_CINDER_VOLUMES,
constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024,
pv_sizes[0]['host'], pv_sizes[0]['size'],
pv_sizes[1]['host'], pv_sizes[1]['size']))
constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB,
pv_sizes[0]['host'], math.floor(float(pv_sizes[0]['size']) / 1024 * 1000) / 1000.0,
pv_sizes[1]['host'], math.floor(float(pv_sizes[1]['size']) / 1024 * 1000) / 1000.0))
raise wsme.exc.ClientSideError(msg)
if pv_sizes[0]['size'] < (constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024):
msg = (_('Minimum allocated storage for %s PVs is: %s MiB. '
'Current allocation is: %s MiB.') %
msg = (_('Minimum allocated storage for %s PVs is: %s GiB. '
'Current allocation is: %s GiB.') %
(constants.LVG_CINDER_VOLUMES,
constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB * 1024,
pv_sizes[0]['size']))
constants.CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB,
math.floor(float(pv_sizes[0]['size']) / 1024 * 1000) / 1000.0))
raise wsme.exc.ClientSideError(msg)
# Log all the LVM parameters

View File

@ -474,6 +474,7 @@ PV_NAME_UNKNOWN = 'unknown'
# Storage: Volume Group Parameter Types
LVG_NOVA_PARAM_BACKING = 'instance_backing'
LVG_NOVA_PARAM_INST_LV_SZ = 'instances_lv_size_mib'
LVG_NOVA_PARAM_INST_LV_SZ_GIB = 'instances_lv_size_gib'
LVG_NOVA_PARAM_DISK_OPS = 'concurrent_disk_operations'
LVG_CINDER_PARAM_LVM_TYPE = 'lvm_type'