Use oslo-incubator module units

There is Cinder's version units module, that looks good. Considering
consistency with other projects like Nova, Glance, Cinder would be
better to use oslo-incubator's version.

Change-Id: I07e93e9d8a985df0f96c3e80de9c3f23bf6a0c1e
This commit is contained in:
Eric Guo 2013-12-30 19:36:49 +08:00 committed by ChangBo Guo(gcb)
parent 581d8ada14
commit 7f13e7e0fd
50 changed files with 257 additions and 238 deletions

View File

@ -55,7 +55,7 @@ from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
@ -75,7 +75,7 @@ service_opts = [
help='The Ceph user to connect with. Default here is to use '
'the same user as for Cinder volumes. If not using cephx '
'this should be set to None.'),
cfg.IntOpt('backup_ceph_chunk_size', default=(units.MiB * 128),
cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128),
help='The chunk size, in bytes, that a backup is broken into '
'before transfer to the Ceph object store.'),
cfg.StrOpt('backup_ceph_pool', default='backups',
@ -814,7 +814,7 @@ class CephBackupDriver(BackupDriver):
errmsg = _("Need non-zero volume size")
raise exception.InvalidParameterValue(errmsg)
return int(volume['size']) * units.GiB
return int(volume['size']) * units.Gi
def _backup_metadata(self, backup):
"""Backup volume metadata.
@ -1097,7 +1097,7 @@ class CephBackupDriver(BackupDriver):
volume_name = volume['name']
backup_id = backup['id']
backup_volume_id = backup['volume_id']
length = int(volume['size']) * units.GiB
length = int(volume['size']) * units.Gi
base_name = self._get_backup_base_name(backup['volume_id'],
diff_format=True)

View File

@ -43,7 +43,7 @@ from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import units
from cinder.openstack.common import units
from swiftclient import client as swift
@ -231,7 +231,7 @@ class SwiftBackupDriver(BackupDriver):
backup['service_metadata'] = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata':
object_prefix})
volume_size_bytes = volume['size'] * units.GiB
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s to swift,'
' volume size: %(volume_size_bytes)d, swift object names'

View File

@ -35,7 +35,7 @@ from cinder.openstack.common import fileutils
from cinder.openstack.common import imageutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import utils as volume_utils
@ -182,7 +182,7 @@ def fetch_to_volume_format(context, image_service,
return
data = qemu_img_info(tmp)
virt_size = data.virtual_size / units.GiB
virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will

View File

@ -1,4 +1,5 @@
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -11,11 +12,27 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A module where we define some basic units for use across Cinder.
Unit constants
"""
KiB = 1024
MiB = KiB * 1024
GiB = MiB * 1024
TiB = GiB * 1024
#Binary unit constants.
Ki = 1024
Mi = 1024 ** 2
Gi = 1024 ** 3
Ti = 1024 ** 4
Pi = 1024 ** 5
Ei = 1024 ** 6
Zi = 1024 ** 7
Yi = 1024 ** 8
#Decimal unit constants.
k = 1000
M = 1000 ** 2
G = 1000 ** 3
T = 1000 ** 4
P = 1000 ** 5
E = 1000 ** 6
Z = 1000 ** 7
Y = 1000 ** 8

View File

@ -24,8 +24,8 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import coraid
@ -37,7 +37,7 @@ LOG = logging.getLogger(__name__)
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.GiB / 1000)
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):

View File

@ -33,8 +33,8 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import imageutils
from cinder.openstack.common import processutils as putils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume import driver as base_driver
@ -473,9 +473,9 @@ class GlusterFsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((2 * units.GiB, 5 * units.GiB))
AndReturn((2 * units.Gi, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((3 * units.GiB, 10 * units.GiB))
AndReturn((3 * units.Gi, 10 * units.Gi))
mox.ReplayAll()
@ -494,9 +494,9 @@ class GlusterFsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((0, 5 * units.GiB))
AndReturn((0, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((0, 10 * units.GiB))
AndReturn((0, 10 * units.Gi))
mox.ReplayAll()
@ -577,7 +577,7 @@ class GlusterFsDriverTestCase(test.TestCase):
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', path,
str(volume['size'] * units.GiB),
str(volume['size'] * units.Gi),
run_as_root=True)
drv._execute('chmod', 'ugo+rw', path, run_as_root=True)

View File

@ -23,8 +23,8 @@ from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import gpfs
@ -750,7 +750,7 @@ class GPFSDriverTestCase(test.TestCase):
mock_rw_permission,
mock_gpfs_redirect,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.GiB
mock_resize_volume_file.return_value = 5 * units.Gi
volume = {}
volume['size'] = 1000
self.assertEqual(self.driver.create_volume_from_snapshot(volume, ''),
@ -765,7 +765,7 @@ class GPFSDriverTestCase(test.TestCase):
mock_create_gpfs_clone,
mock_rw_permission,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.GiB
mock_resize_volume_file.return_value = 5 * units.Gi
volume = {}
volume['size'] = 1000
self.assertEqual(self.driver.create_cloned_volume(volume, ''),
@ -955,7 +955,7 @@ class GPFSDriverTestCase(test.TestCase):
self.assertEqual(None, self.driver.terminate_connection('', ''))
def test_get_volume_stats(self):
fake_avail = 80 * units.GiB
fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_available_capacity',
@ -1384,14 +1384,14 @@ class GPFSDriverTestCase(test.TestCase):
data = FakeQemuImgInfo()
data.file_format = 'qcow2'
data.backing_file = None
data.virtual_size = 1 * units.GiB
data.virtual_size = 1 * units.Gi
return data
def _fake_qemu_raw_image_info(self, path):
data = FakeQemuImgInfo()
data.file_format = 'raw'
data.backing_file = None
data.virtual_size = 1 * units.GiB
data.virtual_size = 1 * units.Gi
return data
def _fake_retype_arguments(self):

View File

@ -20,8 +20,8 @@ import mock
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.tests import fake_hp_3par_client as hp3parclient
from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver
@ -772,7 +772,7 @@ class HP3PARBaseDriver(object):
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
growth_size_mib = grow_size * units.KiB
growth_size_mib = grow_size * units.Ki
expected = [
mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)]

View File

@ -19,8 +19,8 @@ import mock
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.tests import fake_hp_lefthand_client as hplefthandclient
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
@ -672,8 +672,8 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
_mock_client.return_value.getClusterByName.return_value = {
'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]}
_mock_client.return_value.getCluster.return_value = {
'spaceTotal': units.GiB * 500,
'spaceAvailable': units.GiB * 250}
'spaceTotal': units.Gi * 500,
'spaceAvailable': units.Gi * 250}
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
@ -699,7 +699,7 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
units.Gi,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
@ -737,7 +737,7 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
units.Gi,
{'isThinProvisioned': False, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
@ -785,7 +785,7 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'size': 2 * units.GiB})]
mock.call.modifyVolume(1, {'size': 2 * units.Gi})]
# validate call chain
mock_client.assert_has_calls(expected)
@ -1372,7 +1372,7 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
units.Gi,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
@ -1403,7 +1403,7 @@ class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
units.Gi,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1',
'isAdaptiveOptimizationEnabled': False})]

View File

@ -23,8 +23,8 @@ from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder import utils
@ -37,7 +37,7 @@ class FakeImageService:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
return {'size': 2 * units.GiB,
return {'size': 2 * units.Gi,
'disk_format': 'qcow2',
'container_format': 'bare'}

View File

@ -24,8 +24,8 @@ import mox as mox_lib
from cinder import context
from cinder import db
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import iscsi
@ -660,9 +660,9 @@ class TestNexentaNfsDriver(test.TestCase):
self.mox.ReplayAll()
total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1)
self.assertEqual(total, 3 * units.GiB)
self.assertEqual(free, units.GiB)
self.assertEqual(allocated, 2 * units.GiB)
self.assertEqual(total, 3 * units.Gi)
self.assertEqual(free, units.Gi)
self.assertEqual(allocated, 2 * units.Gi)
def test_get_share_datasets(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
@ -748,8 +748,8 @@ class TestNexentaUtils(test.TestCase):
('1023b', 1023),
('0B', 0),
# Test other units
('1M', units.MiB),
('1.0M', units.MiB),
('1M', units.Mi),
('1.0M', units.Mi),
)
for value, result in values_to_test:
@ -761,9 +761,9 @@ class TestNexentaUtils(test.TestCase):
def test_str2gib_size(self):
self.assertEqual(utils.str2gib_size('1024M'), 1)
self.assertEqual(utils.str2gib_size('300M'),
300 * units.MiB // units.GiB)
300 * units.Mi // units.Gi)
self.assertEqual(utils.str2gib_size('1.2T'),
1.2 * units.TiB // units.GiB)
1.2 * units.Ti // units.Gi)
self.assertRaises(ValueError, utils.str2gib_size, 'A')
def test_parse_nms_url(self):

View File

@ -28,8 +28,8 @@ from oslo.config import cfg
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
@ -87,7 +87,7 @@ class RemoteFsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_execute')
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', '/path',
'%s' % str(file_size * units.GiB), run_as_root=True)
'%s' % str(file_size * units.Gi), run_as_root=True)
mox.ReplayAll()
@ -183,7 +183,7 @@ class NfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(image_utils, 'qemu_img_info')
data = mox_lib.MockAnything()
data.virtual_size = 1 * units.GiB
data.virtual_size = 1 * units.Gi
image_utils.qemu_img_info(TEST_IMG_SOURCE).AndReturn(data)
mox.ReplayAll()
@ -412,17 +412,17 @@ class NfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 2 * units.GiB,
2 * units.GiB))
AndReturn((5 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 2 * units.GiB,
2 * units.GiB))
AndReturn((5 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 3 * units.GiB,
1 * units.GiB))
AndReturn((10 * units.Gi, 3 * units.Gi,
1 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 3 * units.GiB,
1 * units.GiB))
AndReturn((10 * units.Gi, 3 * units.Gi,
1 * units.Gi))
mox.ReplayAll()
@ -440,10 +440,10 @@ class NfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 0, 5 * units.GiB))
AndReturn((5 * units.Gi, 0, 5 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 0,
10 * units.GiB))
AndReturn((10 * units.Gi, 0,
10 * units.Gi))
mox.ReplayAll()
@ -612,11 +612,11 @@ class NfsDriverTestCase(test.TestCase):
drv._ensure_shares_mounted()
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((10 * units.GiB, 2 * units.GiB,
2 * units.GiB))
AndReturn((10 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((20 * units.GiB, 3 * units.GiB,
3 * units.GiB))
AndReturn((20 * units.Gi, 3 * units.Gi,
3 * units.Gi))
mox.ReplayAll()
@ -637,9 +637,9 @@ class NfsDriverTestCase(test.TestCase):
requested_volume_size)
def test_is_share_eligible(self):
total_size = 100.0 * units.GiB
total_available = 90.0 * units.GiB
total_allocated = 10.0 * units.GiB
total_size = 100.0 * units.Gi
total_available = 90.0 * units.Gi
total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
@ -648,9 +648,9 @@ class NfsDriverTestCase(test.TestCase):
requested_volume_size))
def test_is_share_eligible_above_used_ratio(self):
total_size = 100.0 * units.GiB
total_available = 4.0 * units.GiB
total_allocated = 96.0 * units.GiB
total_size = 100.0 * units.Gi
total_available = 4.0 * units.Gi
total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
@ -660,9 +660,9 @@ class NfsDriverTestCase(test.TestCase):
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
total_size = 100.0 * units.GiB
total_available = 10.0 * units.GiB
total_allocated = 90.0 * units.GiB
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
@ -672,9 +672,9 @@ class NfsDriverTestCase(test.TestCase):
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
total_size = 100.0 * units.GiB
total_available = 10.0 * units.GiB
total_allocated = 100.0 * units.GiB
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
@ -738,7 +738,7 @@ class NfsDriverTestCase(test.TestCase):
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = size * units.GiB
data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
@ -750,7 +750,7 @@ class NfsDriverTestCase(test.TestCase):
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = (size + 1) * units.GiB
data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):

View File

@ -26,10 +26,10 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder import units
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
@ -173,10 +173,10 @@ class RBDTestCase(test.TestCase):
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.MiB
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.GiB, order]
self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
@ -196,10 +196,10 @@ class RBDTestCase(test.TestCase):
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.MiB
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.GiB, order]
self.volume_size * units.Gi, order]
kwargs = {'old_format': True,
'features': 0}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
@ -667,7 +667,7 @@ class RBDTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.GiB
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()

View File

@ -26,8 +26,8 @@ import mox as mox_lib
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume.drivers import scality
@ -181,7 +181,7 @@ class ScalityDriverTestCase(test.TestCase):
self.TEST_VOLNAME))
self.assertTrue(os.path.isfile(self.TEST_VOLPATH))
self.assertEqual(os.stat(self.TEST_VOLPATH).st_size,
100 * units.MiB)
100 * units.Mi)
def test_delete_volume(self):
"""Expected behaviour for delete_volume."""

View File

@ -21,8 +21,8 @@ import tempfile
from cinder.image import image_utils
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume.drivers.sheepdog import SheepdogDriver
@ -69,8 +69,8 @@ class SheepdogTestCase(test.TestCase):
vendor_name='Open Source',
dirver_version=self.driver.VERSION,
storage_protocol='sheepdog',
total_capacity_gb=float(107287605248) / units.GiB,
free_capacity_gb=float(107287605248 - 3623897354) / units.GiB,
total_capacity_gb=float(107287605248) / units.Gi,
free_capacity_gb=float(107287605248 - 3623897354) / units.Gi,
reserved_percentage=0,
QoS_support=False)
actual = self.driver.get_volume_stats(True)
@ -134,7 +134,7 @@ class SheepdogTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.GiB
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()

View File

@ -20,8 +20,8 @@ from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers.solidfire import SolidFireDriver
from cinder.volume import qos_specs
@ -121,7 +121,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'name': test_name,
'accountID': 25,
'sliceCount': 1,
'totalSize': 1 * units.GiB,
'totalSize': 1 * units.Gi,
'enable512e': True,
'access': "readWrite",
'status': "active",

View File

@ -29,9 +29,9 @@ from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import test
from cinder.tests import utils as testutils
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import storwize_svc
@ -627,7 +627,7 @@ port_speed!N/A
return self._errors['CMMVC5753E']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.GiB
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = str(curr_size + addition)
return ('', '')
@ -1765,7 +1765,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Make sure volume attributes are as they should be
attributes = self.driver._helpers.get_vdisk_attributes(volume['name'])
attr_size = float(attributes['capacity']) / units.GiB # bytes to GB
attr_size = float(attributes['capacity']) / units.Gi # bytes to GB
self.assertEqual(attr_size, float(volume['size']))
pool = self.driver.configuration.local_conf.storwize_svc_volpool_name
self.assertEqual(attributes['mdisk_grp_name'], pool)
@ -2230,7 +2230,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
volume = self._create_volume()
self.driver.extend_volume(volume, '13')
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
vol_size = int(attrs['capacity']) / units.GiB
vol_size = int(attrs['capacity']) / units.Gi
self.assertAlmostEqual(vol_size, 13)
snap = self._generate_vol_info(volume['name'], volume['id'])

View File

@ -25,8 +25,8 @@ import mox
from cinder import exception
from cinder.image import glance
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
@ -494,7 +494,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
size = volume['size'] * units.GiB
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
def test_get_disk_type(self):
@ -533,7 +533,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
volume['size'] * units.MiB,
volume['size'] * units.Mi,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg(),
@ -957,7 +957,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
fake_context = mock.sentinel.context
fake_image_id = 'image-id'
fake_image_meta = {'disk_format': 'vmdk',
'size': 2 * units.GiB,
'size': 2 * units.Gi,
'properties': {'vmware_disktype': 'preallocated'}}
image_service = mock.Mock(glance.GlanceImageService)
fake_size = 3
@ -1051,8 +1051,8 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
size = 5 * units.GiB
size_gb = float(size) / units.GiB
size = 5 * units.Gi
size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'properties': {'vmware_disktype':
@ -1189,7 +1189,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
size = size_gb * units.GiB
size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['instance_uuid'] = None
@ -1851,7 +1851,7 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
size = volume['size'] * units.GiB
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)

View File

@ -19,8 +19,8 @@ Test suite for VMware VMDK driver volumeops module.
import mock
from cinder.openstack.common import units
from cinder import test
from cinder import units
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import volumeops
@ -801,7 +801,7 @@ class VolumeOpsTestCase(test.TestCase):
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
fake_size = 5
fake_size_in_kb = fake_size * units.MiB
fake_size_in_kb = fake_size * units.Mi
fake_name = 'fake_volume_0000000001'
fake_dc = mock.sentinel.datacenter
self.vops.extend_virtual_disk(fake_size,

View File

@ -44,6 +44,7 @@ from cinder.openstack.common import fileutils
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
import cinder.policy
from cinder import quota
from cinder import test
@ -53,7 +54,6 @@ from cinder.tests import fake_notifier
from cinder.tests.image import fake as fake_image
from cinder.tests.keymgr import fake as fake_keymgr
from cinder.tests import utils as tests_utils
from cinder import units
from cinder import utils
import cinder.volume
from cinder.volume import configuration as conf
@ -83,7 +83,7 @@ class FakeImageService:
pass
def show(self, context, image_id):
return {'size': 2 * units.GiB,
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
@ -2066,7 +2066,7 @@ class VolumeTestCase(BaseVolumeTestCase):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB + 1,
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
@ -2083,7 +2083,7 @@ class VolumeTestCase(BaseVolumeTestCase):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB,
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
@ -2101,7 +2101,7 @@ class VolumeTestCase(BaseVolumeTestCase):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB,
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,

View File

@ -33,7 +33,7 @@ from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume import volume_types
@ -140,7 +140,7 @@ class CoraidRESTClient(object):
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.GiB / 1000)
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):

View File

@ -27,7 +27,7 @@ from xml.dom.minidom import parseString
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@ -80,7 +80,7 @@ class EMCSMISCommon():
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
LOG.debug('Entering create_volume.')
volumesize = int(volume['size']) * units.GiB
volumesize = int(volume['size']) * units.Gi
volumename = volume['name']
LOG.info(_('Create Volume: %(volume)s Size: %(size)lu')
@ -954,7 +954,7 @@ class EMCSMISCommon():
def extend_volume(self, volume, new_size):
"""Extends an existing volume."""
LOG.debug('Entering extend_volume.')
volumesize = int(new_size) * units.GiB
volumesize = int(new_size) * units.Gi
volumename = volume['name']
LOG.info(_('Extend Volume: %(volume)s New size: %(size)lu')

View File

@ -31,7 +31,7 @@ from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
@ -1104,7 +1104,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
greatest_share = glusterfs_share
greatest_size = capacity
if volume_size_for * units.GiB > greatest_size:
if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share

View File

@ -19,7 +19,7 @@ Hitachi Unified Storage (HUS-HNAS) platform. Backend operations.
"""
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder import utils
import re
@ -116,13 +116,13 @@ class HnasBackend():
(inf[0], inf[1], inf[2], inf[3], inf[5], inf[7])
(availunit, usedunit) = (inf[4], inf[6])
if usedunit == 'GB':
usedmultiplier = units.KiB
usedmultiplier = units.Ki
else:
usedmultiplier = units.MiB
usedmultiplier = units.Mi
if availunit == 'GB':
availmultiplier = units.KiB
availmultiplier = units.Ki
else:
availmultiplier = units.MiB
availmultiplier = units.Mi
m = re.match("\((\d+)\%\)", perstr)
if m:
percent = m.group(1)

View File

@ -24,7 +24,7 @@ from xml.etree import ElementTree as ETree
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
@ -286,7 +286,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
if 'HDP' in line:
(hdp, size, _ign, used) = line.split()[1:5] # in MB
LOG.debug("stats: looking for: %s", hdp)
if int(hdp) >= units.KiB: # HNAS fsid
if int(hdp) >= units.Ki: # HNAS fsid
hdp = line.split()[11]
if hdp in self.config['hdp'].keys():
total_cap += int(size)
@ -295,9 +295,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
LOG.info("stats: total: %d used: %d" % (total_cap, total_used))
hnas_stat = {}
hnas_stat['total_capacity_gb'] = int(total_cap / units.KiB) # in GB
hnas_stat['total_capacity_gb'] = int(total_cap / units.Ki) # in GB
hnas_stat['free_capacity_gb'] = \
int((total_cap - total_used) / units.KiB)
int((total_cap - total_used) / units.Ki)
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
hnas_stat["vendor_name"] = 'HDS'
@ -321,8 +321,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
for line in out.split('\n'):
if 'HDP' in line:
inf = line.split()
if int(inf[1]) >= units.KiB:
# HDP fsids start at units.KiB (1024)
if int(inf[1]) >= units.Ki:
# HDP fsids start at units.Ki (1024)
hdp_list.append(inf[11])
else:
# HDP pools are 2-digits max
@ -434,7 +434,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
self.config['username'],
self.config['password'],
hdp,
'%s' % (int(volume['size']) * units.KiB),
'%s' % (int(volume['size']) * units.Ki),
volume['name'])
LOG.info(_("create_volume: create_lu returns %s") % out)
@ -458,7 +458,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
raise exception.VolumeBackendAPIException(data=msg)
service = self._get_service(dst)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
size = int(src['size']) * units.KiB
size = int(src['size']) * units.Ki
source_vol = self._id_to_vol(src['id'])
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
@ -491,7 +491,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
self.config['username'],
self.config['password'],
hdp, lun,
'%s' % (new_size * units.KiB),
'%s' % (new_size * units.Ki),
volume['name'])
LOG.info(_("LUN %(lun)s extended to %(size)s GB.")
@ -615,7 +615,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
:param snapshot: dictionary snapshot reference
"""
size = int(snapshot['volume_size']) * units.KiB
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
@ -640,7 +640,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
source_vol = self._id_to_vol(snapshot['volume_id'])
service = self._get_service(source_vol)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
size = int(snapshot['volume_size']) * units.KiB
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],

View File

@ -28,7 +28,7 @@ from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers import nfs
@ -226,7 +226,7 @@ class HDSNFSDriver(nfs.NfsDriver):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.GiB
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True

View File

@ -28,7 +28,7 @@ from cinder import context
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume import volume_types
@ -220,9 +220,9 @@ class HVSCommon():
calculates volume size with sectors, which is 512 bytes.
"""
volume_size = units.GiB / 512 # 1G
volume_size = units.Gi / 512 # 1G
if int(volume['size']) != 0:
volume_size = int(volume['size']) * units.GiB / 512
volume_size = int(volume['size']) * units.Gi / 512
return volume_size
@ -1290,7 +1290,7 @@ class HVSCommon():
lun_id = self._get_volume_by_name(name)
if lun_id:
url = self.url + "/lun/expand"
capacity = int(new_size) * units.GiB / 512
capacity = int(new_size) * units.Gi / 512
data = json.dumps({"TYPE": "11",
"ID": lun_id,
"CAPACITY": capacity})

View File

@ -28,7 +28,7 @@ from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
@ -432,7 +432,7 @@ class GPFSDriver(driver.VolumeDriver):
"""Preallocate file blocks by writing zeros."""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
@ -514,7 +514,7 @@ class GPFSDriver(driver.VolumeDriver):
self._set_rw_permission(volume_path)
self._gpfs_redirect(volume_path)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.GiB)}
return {'size': math.ceil(virt_size / units.Gi)}
def create_cloned_volume(self, volume, src_vref):
"""Create a GPFS volume from another volume."""
@ -524,7 +524,7 @@ class GPFSDriver(driver.VolumeDriver):
self._create_gpfs_clone(src, dest)
self._set_rw_permission(dest)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.GiB)}
return {'size': math.ceil(virt_size / units.Gi)}
def _delete_gpfs_file(self, fchild):
"""Delete a GPFS file and cleanup clone children."""
@ -688,8 +688,8 @@ class GPFSDriver(driver.VolumeDriver):
data["storage_protocol"] = 'file'
free, capacity = self._get_available_capacity(self.configuration.
gpfs_mount_point_base)
data['total_capacity_gb'] = math.ceil(capacity / units.GiB)
data['free_capacity_gb'] = math.ceil(free / units.GiB)
data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
data['free_capacity_gb'] = math.ceil(free / units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['storage_pool'] = self._storage_pool

View File

@ -36,7 +36,7 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
from cinder.volume.drivers.nfs import nas_opts
@ -126,8 +126,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.GiB)
data['free_capacity_gb'] = global_free / float(units.GiB)
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data

View File

@ -42,7 +42,7 @@ from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.san import san
@ -853,7 +853,7 @@ class StorwizeSVCDriver(san.SanDriver):
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.GiB))
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
@ -895,9 +895,9 @@ class StorwizeSVCDriver(san.SanDriver):
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
units.GiB)
units.Gi)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
units.GiB)
units.Gi)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %

View File

@ -31,7 +31,7 @@ from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
@ -163,7 +163,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.KiB
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
@ -210,7 +210,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.KiB,
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
@ -304,7 +304,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.KiB,
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:

View File

@ -25,7 +25,7 @@ from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
@ -306,7 +306,7 @@ class Driver(driver.ISCSIDriver):
def _get_sorted_avl_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.GiB
size = size_gb * units.Gi
pools = self._client.list_storage_pools()
sorted_pools = sorted(pools, key=lambda x:
(int(x.get('totalRaidedSpace', 0))
@ -350,7 +350,7 @@ class Driver(driver.ISCSIDriver):
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_cached_snap_grp_image(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.GiB
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_avl_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
@ -423,7 +423,7 @@ class Driver(driver.ISCSIDriver):
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
vol = self._get_volume(snapshot['volume_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.GiB
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_avl_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
@ -646,8 +646,8 @@ class Driver(driver.ISCSIDriver):
if pool['volumeGroupRef'] in self._objects['disk_pool_refs']:
tot_bytes = tot_bytes + int(pool.get('totalRaidedSpace', 0))
used_bytes = used_bytes + int(pool.get('usedSpace', 0))
self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.GiB
self._stats['total_capacity_gb'] = tot_bytes / units.GiB
self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.Gi
self._stats['total_capacity_gb'] = tot_bytes / units.Gi
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""

View File

@ -30,7 +30,7 @@ from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
@ -577,7 +577,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
name = volume['name']
path = self.lun_table[name].metadata['Path']
curr_size_bytes = str(self.lun_table[name].size)
new_size_bytes = str(int(new_size) * units.GiB)
new_size_bytes = str(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
@ -1103,9 +1103,9 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
if self.ssc_vols['all']:
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
int(vol_max.space['size_total_bytes']) / units.GiB
int(vol_max.space['size_total_bytes']) / units.Gi
data['free_capacity_gb'] =\
int(vol_max.space['size_avl_bytes']) / units.GiB
int(vol_max.space['size_avl_bytes']) / units.Gi
else:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
@ -1539,8 +1539,8 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
avl_size = vol.get_child_content('size-available')
if avl_size:
free_bytes = free_bytes + int(avl_size)
self.total_gb = total_bytes / units.GiB
self.free_gb = free_bytes / units.GiB
self.total_gb = total_bytes / units.Gi
self.free_gb = free_bytes / units.Gi
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""

View File

@ -30,7 +30,7 @@ from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
@ -493,7 +493,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.GiB
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
@ -969,9 +969,9 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
if self.ssc_vols['all']:
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
int(vol_max.space['size_total_bytes']) / units.GiB
int(vol_max.space['size_total_bytes']) / units.Gi
data['free_capacity_gb'] =\
int(vol_max.space['size_avl_bytes']) / units.GiB
int(vol_max.space['size_avl_bytes']) / units.Gi
else:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0

View File

@ -29,7 +29,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
@ -294,7 +294,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_('Creating regular file: %s.'
'This may take some time.') % path)

View File

@ -25,7 +25,7 @@ import re
import six.moves.urllib.parse as urlparse
from cinder import units
from cinder.openstack.common import units
def str2size(s, scale=1024):
@ -59,7 +59,7 @@ def str2size(s, scale=1024):
def str2gib_size(s):
"""Covert size-string to size in gigabytes."""
size_in_bytes = str2size(s)
return size_in_bytes / units.GiB
return size_in_bytes / units.Gi
def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None,

View File

@ -24,7 +24,7 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
@ -229,7 +229,7 @@ class RemoteFsDriver(driver.VolumeDriver):
"""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
@ -241,7 +241,7 @@ class RemoteFsDriver(driver.VolumeDriver):
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.GiB),
path, str(size_gb * units.Gi),
run_as_root=True)
def _set_rw_permissions_for_all(self, path):
@ -275,7 +275,7 @@ class RemoteFsDriver(driver.VolumeDriver):
image_utils.resize_image(self.local_path(volume), volume['size'])
data = image_utils.qemu_img_info(self.local_path(volume))
virt_size = data.virtual_size / units.GiB
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
@ -360,8 +360,8 @@ class RemoteFsDriver(driver.VolumeDriver):
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.GiB)
data['free_capacity_gb'] = global_free / float(units.GiB)
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
@ -521,7 +521,7 @@ class NfsDriver(RemoteFsDriver):
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.GiB
requested_volume_size = volume_size_in_gib * units.Gi
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
@ -590,5 +590,5 @@ class NfsDriver(RemoteFsDriver):
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.GiB
virt_size = data.virtual_size / units.Gi
return virt_size == size

View File

@ -28,7 +28,7 @@ from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import units
from cinder.openstack.common import units
from cinder.volume import driver
try:
@ -346,8 +346,8 @@ class RBDDriver(driver.VolumeDriver):
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
stats['total_capacity_gb'] = new_stats['kb'] / units.MiB
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.MiB
stats['total_capacity_gb'] = new_stats['kb'] / units.Mi
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
@ -468,15 +468,15 @@ class RBDDriver(driver.VolumeDriver):
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100 * units.MiB
size = 100 * units.Mi
else:
size = int(volume['size']) * units.GiB
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'" % (volume['name']))
old_format = True
features = 0
chunk_size = CONF.rbd_store_chunk_size * units.MiB
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
if self._supports_layering():
old_format = False
@ -512,7 +512,7 @@ class RBDDriver(driver.VolumeDriver):
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume['size']) * units.GiB
size = int(volume['size']) * units.Gi
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
@ -786,7 +786,7 @@ class RBDDriver(driver.VolumeDriver):
self.delete_volume(volume)
chunk_size = CONF.rbd_store_chunk_size * units.MiB
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
@ -848,7 +848,7 @@ class RBDDriver(driver.VolumeDriver):
old_size = volume['size']
try:
size = int(new_size) * units.GiB
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '

View File

@ -54,7 +54,7 @@ from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import units
from cinder.openstack.common import units
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -264,7 +264,7 @@ class HP3PARCommon(object):
" by %(diff)s GB." %
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
growth_size_mib = growth_size * units.KiB
growth_size_mib = growth_size * units.Ki
self._extend_volume(volume, volume_name, growth_size_mib)
def _extend_volume(self, volume, volume_name, growth_size_mib,
@ -637,13 +637,13 @@ class HP3PARCommon(object):
if min_io is None:
qosRule['ioMinGoal'] = int(max_io)
if min_bw:
qosRule['bwMinGoalKB'] = int(min_bw) * units.KiB
qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki
if max_bw is None:
qosRule['bwMaxLimitKB'] = int(min_bw) * units.KiB
qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki
if max_bw:
qosRule['bwMaxLimitKB'] = int(max_bw) * units.KiB
qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki
if min_bw is None:
qosRule['bwMinGoalKB'] = int(max_bw) * units.KiB
qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki
if latency:
qosRule['latencyGoal'] = int(latency)
if priority:
@ -1002,7 +1002,7 @@ class HP3PARCommon(object):
LOG.debug('Converting to base volume type: %s.' %
volume['id'])
self._convert_to_base_volume(volume)
growth_size_mib = growth_size * units.GiB / units.MiB
growth_size_mib = growth_size * units.Gi / units.Mi
LOG.debug('Growing volume: %(id)s by %(size)s GiB.' %
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)

View File

@ -25,7 +25,7 @@ from lxml import etree
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers.san.san import SanISCSIDriver
@ -438,7 +438,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver):
cluster_node = result_xml.find("response/cluster")
total_capacity = cluster_node.attrib.get("spaceTotal")
free_capacity = cluster_node.attrib.get("unprovisionedSpace")
GB = units.GiB
GB = units.Gi
data['total_capacity_gb'] = int(total_capacity) / GB
data['free_capacity_gb'] = int(free_capacity) / GB

View File

@ -18,7 +18,7 @@
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.driver import ISCSIDriver
from cinder.volume import volume_types
@ -168,7 +168,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
volume_info = self.client.createVolume(
volume['name'], self.cluster_id,
volume['size'] * units.GiB,
volume['size'] * units.Gi,
optional)
return self._update_provider(volume_info)
@ -191,7 +191,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
volume_info = self.client.getVolumeByName(volume['name'])
# convert GB to bytes
options = {'size': int(new_size) * units.GiB}
options = {'size': int(new_size) * units.Gi}
self.client.modifyVolume(volume_info['id'], options)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
@ -249,8 +249,8 @@ class HPLeftHandRESTProxy(ISCSIDriver):
free_capacity = cluster_info['spaceAvailable']
# convert to GB
data['total_capacity_gb'] = int(total_capacity) / units.GiB
data['free_capacity_gb'] = int(free_capacity) / units.GiB
data['total_capacity_gb'] = int(total_capacity) / units.Gi
data['free_capacity_gb'] = int(free_capacity) / units.Gi
self.device_stats = data

View File

@ -27,7 +27,7 @@ import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume import driver
@ -108,8 +108,8 @@ class ScalityDriver(driver.VolumeDriver):
def _size_bytes(self, size_in_g):
if int(size_in_g) == 0:
return 100 * units.MiB
return int(size_in_g) * units.GiB
return 100 * units.Mi
return int(size_in_g) * units.Gi
def _create_file(self, path, size):
with open(path, "ab") as f:

View File

@ -28,7 +28,7 @@ from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder.openstack.common import units
from cinder.volume import driver
@ -90,7 +90,7 @@ class SheepdogDriver(driver.VolumeDriver):
def _resize(self, volume, size=None):
if not size:
size = int(volume['size']) * units.GiB
size = int(volume['size']) * units.Gi
self._try_execute('collie', 'vdi', 'resize',
volume['name'], size)
@ -172,8 +172,8 @@ class SheepdogDriver(driver.VolumeDriver):
m = self.stats_pattern.match(stdout)
total = float(m.group(1))
used = float(m.group(2))
stats['total_capacity_gb'] = total / units.GiB
stats['free_capacity_gb'] = (total - used) / units.GiB
stats['total_capacity_gb'] = total / units.Gi
stats['free_capacity_gb'] = (total - used) / units.Gi
except processutils.ProcessExecutionError:
LOG.exception(_('error refreshing volume stats'))
@ -189,7 +189,7 @@ class SheepdogDriver(driver.VolumeDriver):
old_size = volume['size']
try:
size = int(new_size) * units.GiB
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '

View File

@ -28,7 +28,7 @@ from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -356,7 +356,7 @@ class SolidFireDriver(SanISCSIDriver):
params = {'volumeID': int(sf_vol['volumeID']),
'name': 'UUID-%s' % v_ref['id'],
'newSize': int(new_size * units.GiB),
'newSize': int(new_size * units.Gi),
'newAccountID': sfaccount['accountID']}
data = self._issue_api_request('CloneVolume', params)
@ -521,7 +521,7 @@ class SolidFireDriver(SanISCSIDriver):
params = {'name': 'UUID-%s' % volume['id'],
'accountID': None,
'sliceCount': slice_count,
'totalSize': int(volume['size'] * units.GiB),
'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
@ -647,7 +647,7 @@ class SolidFireDriver(SanISCSIDriver):
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.GiB)
'totalSize': int(new_size * units.Gi)
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
@ -682,9 +682,9 @@ class SolidFireDriver(SanISCSIDriver):
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] =\
float(results['maxProvisionedSpace'] / units.GiB)
float(results['maxProvisionedSpace'] / units.Gi)
data['free_capacity_gb'] = float(free_capacity / units.GiB)
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] =\

View File

@ -30,7 +30,7 @@ from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
@ -408,7 +408,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
"volume since policy based placement is "
"disabled."), storage_profile)
size_bytes = volume['size'] * units.GiB
size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes,
datastores)
return (folder, datastore_summary)
@ -440,7 +440,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
resource_pool,
datastores)
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
size_kb = volume['size'] * units.MiB
size_kb = volume['size'] * units.Mi
storage_profile = self._get_storage_profile(volume)
profileId = None
if self._storage_policy_enabled and storage_profile:
@ -814,7 +814,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
"streamOptimized"
"""
# Set volume size in GB from image metadata
volume['size'] = float(image_size) / units.GiB
volume['size'] = float(image_size) / units.Gi
# First create empty backing in the inventory
backing = self._create_backing_in_inventory(volume)
@ -1000,7 +1000,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
# image size. If the volume_size_in_gb is greater, meaning the
# user specifies a larger volume, we need to extend/resize the vmdk
# virtual disk to the capacity specified by the user.
if volume_size_in_gb * units.GiB > image_size_in_bytes:
if volume_size_in_gb * units.Gi > image_size_in_bytes:
self._extend_vmdk_virtual_disk(volume['name'], volume_size_in_gb)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
@ -1045,7 +1045,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
host=host_ip,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.GiB,
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1)
LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %

View File

@ -18,7 +18,7 @@ Implements operations on volumes residing on VMware datastores.
"""
from cinder.openstack.common import log as logging
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
@ -335,7 +335,7 @@ class VMwareVolumeOps(object):
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.MiB
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,

View File

@ -18,7 +18,7 @@ import contextlib
import os
import pickle
from cinder import units
from cinder.openstack.common import units
from cinder.volume.drivers.xenapi import tools
@ -258,7 +258,7 @@ class CompoundOperations(object):
def to_bytes(size_in_gigs):
return size_in_gigs * units.GiB
return size_in_gigs * units.Gi
class NFSOperationsMixIn(CompoundOperations):

View File

@ -20,9 +20,9 @@ from cinder import exception
from cinder import flow_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import policy
from cinder import quota
from cinder import units
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import volume_types
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
GB = units.GiB
GB = units.Gi
QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a

View File

@ -24,8 +24,8 @@ from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import strutils
from cinder.openstack.common import units
from cinder import rpc
from cinder import units
from cinder import utils
@ -123,7 +123,7 @@ def _calculate_count(size_in_m, blocksize):
blocksize = CONF.volume_dd_blocksize
bs = strutils.string_to_bytes('%sB' % blocksize)
count = math.ceil(size_in_m * units.MiB / bs)
count = math.ceil(size_in_m * units.Mi / bs)
return blocksize, int(count)

View File

@ -30,6 +30,7 @@ module=service
module=sslutils
module=strutils
module=timeutils
module=units
module=uuidutils
module=versionutils