Sync module units from oslo

Module units landed in Nova first, then was accepted by oslo with
minor changes. This patch removes nova specific unit module and
test, replaces with module units in oslo.

commit in Oslo:'Add unit constants' f9308560c36918cc

Change-Id: I6054c18a56f59d33a69f4714be19d34fbe9de335
Co-Authored-By: Sahid Orentino Ferdjaoui <sahid.ferdjaoui@cloudwatt.com>
This commit is contained in:
Eric Guo 2013-12-30 21:04:23 +08:00 committed by Sahid Orentino Ferdjaoui
parent 35ffc74efc
commit 1a58ec19a3
32 changed files with 146 additions and 169 deletions

View File

@ -36,7 +36,7 @@ from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
@ -181,7 +181,7 @@ def get_dev(address, port, iqn, lun):
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = unit.Mi
mb = units.Mi
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
@ -19,7 +17,7 @@
Unit constants
"""
#Byte unit constant.
#Binary unit constants.
Ki = 1024
Mi = 1024 ** 2
Gi = 1024 ** 3
@ -28,3 +26,13 @@ Pi = 1024 ** 5
Ei = 1024 ** 6
Zi = 1024 ** 7
Yi = 1024 ** 8
#Decimal unit constants.
k = 1000
M = 1000 ** 2
G = 1000 ** 3
T = 1000 ** 4
P = 1000 ** 5
E = 1000 ** 6
Z = 1000 ** 7
Y = 1000 ** 8

View File

@ -1,31 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova import unit
class UnitTest(test.NoDBTestCase):
def test_byte_unit(self):
self.assertEqual(unit.Ki, 1024)
self.assertEqual(unit.Mi, 1024 ** 2)
self.assertEqual(unit.Gi, 1024 ** 3)
self.assertEqual(unit.Ti, 1024 ** 4)
self.assertEqual(unit.Pi, 1024 ** 5)
self.assertEqual(unit.Ei, 1024 ** 6)
self.assertEqual(unit.Zi, 1024 ** 7)
self.assertEqual(unit.Yi, 1024 ** 8)

View File

@ -25,9 +25,9 @@ import mox
from nova.cmd import baremetal_deploy_helper as bmdh
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import test
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova import unit
from nova.virt.baremetal import db as bm_db
bmdh.LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
@ -374,7 +374,7 @@ class OtherFunctionTestCase(test.NoDBTestCase):
self.assertEqual(bmdh.get_image_mb('x'), 0)
size = 1
self.assertEqual(bmdh.get_image_mb('x'), 1)
size = unit.Mi
size = units.Mi
self.assertEqual(bmdh.get_image_mb('x'), 1)
size = unit.Mi + 1
size = units.Mi + 1
self.assertEqual(bmdh.get_image_mb('x'), 2)

View File

@ -23,11 +23,11 @@ import mock
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import units
from nova import test
from nova.tests import utils
import nova.tests.virt.docker.mock_client
from nova.tests.virt.test_virt_drivers import _VirtDriverTestCase
from nova import unit
from nova.virt.docker import hostinfo
@ -88,14 +88,14 @@ class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
def test_get_available_resource(self):
memory = {
'total': 4 * unit.Mi,
'free': 3 * unit.Mi,
'used': 1 * unit.Mi
'total': 4 * units.Mi,
'free': 3 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 50 * unit.Gi,
'available': 25 * unit.Gi,
'used': 25 * unit.Gi
'total': 50 * units.Gi,
'available': 25 * units.Gi,
'used': 25 * units.Gi
}
# create the mocks
with contextlib.nested(
@ -196,9 +196,9 @@ class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
def test_get_memory_limit_from_sys_meta_in_object(self):
instance = utils.get_test_instance(obj=True)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * unit.Mi, limit)
self.assertEqual(2048 * units.Mi, limit)
def test_get_memory_limit_from_sys_meta_in_db_instance(self):
instance = utils.get_test_instance(obj=False)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * unit.Mi, limit)
self.assertEqual(2048 * units.Mi, limit)

View File

@ -37,13 +37,13 @@ from nova import db
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import units
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.virt.hyperv import db_fakes
from nova.tests.virt.hyperv import fake
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
@ -279,12 +279,12 @@ class HyperVAPITestCase(test.NoDBTestCase):
self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
self.assertEqual(dic['hypervisor_hostname'], platform.node())
self.assertEqual(dic['memory_mb'], tot_mem_kb / unit.Ki)
self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
self.assertEqual(dic['memory_mb_used'],
tot_mem_kb / unit.Ki - free_mem_kb / unit.Ki)
self.assertEqual(dic['local_gb'], tot_hdd_b / unit.Gi)
tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
self.assertEqual(dic['local_gb_used'],
tot_hdd_b / unit.Gi - free_hdd_b / unit.Gi)
tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
self.assertEqual(dic['hypervisor_version'],
windows_version.replace('.', ''))
self.assertEqual(dic['supported_instances'],

View File

@ -23,11 +23,11 @@ from oslo.config import cfg
import inspect
from nova import exception
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import unit
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
@ -242,13 +242,13 @@ class RawTestCase(_ImageTestCase, test.NoDBTestCase):
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = unit.Gi
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / unit.Gi))
'_%d' % (self.SIZE / units.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()

View File

@ -49,6 +49,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova.pci import pci_manager
from nova import test
@ -57,7 +58,6 @@ import nova.tests.image.fake
from nova.tests import matchers
from nova.tests.objects import test_pci_device
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import unit
from nova import utils
from nova import version
from nova.virt.disk import api as disk
@ -777,7 +777,7 @@ class LibvirtConnTestCase(test.TestCase):
None, disk_info)
self.assertEqual(cfg.acpi, True)
self.assertEqual(cfg.apic, True)
self.assertEqual(cfg.memory, 2 * unit.Mi)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
@ -840,7 +840,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 2),
None, disk_info)
self.assertEqual(cfg.acpi, True)
self.assertEqual(cfg.memory, 2 * unit.Mi)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
@ -900,7 +900,7 @@ class LibvirtConnTestCase(test.TestCase):
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEqual(cfg.acpi, False)
self.assertEqual(cfg.memory, 2 * unit.Mi)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
@ -3509,8 +3509,8 @@ class LibvirtConnTestCase(test.TestCase):
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * unit.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * unit.Gi
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
@ -3602,8 +3602,8 @@ class LibvirtConnTestCase(test.TestCase):
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * unit.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * unit.Gi
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
@ -3929,9 +3929,9 @@ class LibvirtConnTestCase(test.TestCase):
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * unit.Gi},
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * unit.Gi},
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
@ -4011,11 +4011,11 @@ class LibvirtConnTestCase(test.TestCase):
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * unit.Gi},
'size': 10 * units.Gi},
{'filename': 'ephemeral_20_default',
'size': 20 * unit.Gi},
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * unit.Mi},
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)

View File

@ -16,9 +16,9 @@
from lxml import etree
from nova.openstack.common import units
from nova import test
from nova.tests import matchers
from nova import unit
from nova.virt.libvirt import config
@ -959,7 +959,7 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_lxc(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "lxc"
obj.memory = 100 * unit.Mi
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
@ -995,7 +995,7 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_xen_pv(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 100 * unit.Mi
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
@ -1039,7 +1039,7 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_xen_hvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 100 * unit.Mi
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
@ -1087,7 +1087,7 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_kvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
obj.memory = 100 * unit.Mi
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.cpu_shares = 100

View File

@ -37,6 +37,7 @@ from nova import context
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
import nova.tests.image.fake
@ -44,7 +45,6 @@ from nova.tests import matchers
from nova.tests import utils
from nova.tests.virt.vmwareapi import db_fakes
from nova.tests.virt.vmwareapi import stubs
from nova import unit
from nova import utils as nova_utils
from nova.virt import driver as v_driver
from nova.virt import fake
@ -429,7 +429,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def test_spawn_disk_extend(self):
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * unit.Mi
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
@ -447,7 +447,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(result)
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * unit.Mi
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
@ -492,7 +492,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def test_spawn_disk_invalid_disk_size(self):
self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
result = [82 * unit.Gi,
result = [82 * units.Gi,
{"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic",
"vmware_disktype": "sparse"}]

View File

@ -21,9 +21,9 @@ import re
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova import unit
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
@ -58,8 +58,8 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
fake_session(fake_objects))
self.assertEqual(result[1], "fake-ds")
self.assertEqual(result[2], unit.Ti)
self.assertEqual(result[3], 500 * unit.Gi)
self.assertEqual(result[2], units.Ti)
self.assertEqual(result[3], 500 * units.Gi)
def test_get_datastore_ref_and_name_with_regex(self):
# Test with a regex that matches with a datastore

View File

@ -17,8 +17,8 @@
import math
from nova.openstack.common import units
from nova.tests.virt.xenapi import stubs
from nova import unit
from nova.virt import fake
from nova.virt import xenapi
from nova.virt.xenapi import driver as xenapi_driver
@ -28,10 +28,10 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * unit.Mi,
'host_memory_free_computed': 2 * unit.Mi,
'disk_total': 4 * unit.Gi,
'disk_used': 5 * unit.Gi,
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 4 * units.Gi,
'disk_used': 5 * units.Gi,
'host_hostname': 'somename',
'supported_instances': 'x86_64',
'host_cpu_info': {'cpu_count': 50}}

View File

@ -31,10 +31,10 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova import test
from nova.tests.virt.xenapi import stubs
from nova.tests.virt.xenapi import test_xenapi
from nova import unit
from nova import utils
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
@ -160,7 +160,7 @@ class GenerateConfigDriveTestCase(VMUtilsTestBase):
self.mox.StubOutWithMock(vm_utils, 'create_vdi')
vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
'configdrive',
64 * unit.Mi).AndReturn('vdi_ref')
64 * units.Mi).AndReturn('vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
vm_utils.vdi_attached_here(
@ -1071,7 +1071,7 @@ class GenerateDiskTestCase(VMUtilsTestBase):
def _check_vdi(self, vdi_ref, check_attached=True):
vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
self.assertEqual(str(10 * unit.Mi), vdi_rec["virtual_size"])
self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
if check_attached:
vbd_ref = vdi_rec["VBDs"][0]
vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
@ -1194,7 +1194,7 @@ class GenerateEphemeralTestCase(VMUtilsTestBase):
vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
unit.Mi, None).AndRaise(exception.NovaException)
units.Mi, None).AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])

View File

@ -27,7 +27,7 @@ from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova import version
@ -56,7 +56,7 @@ CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * unit.Mi
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
class ConfigDriveBuilder(object):

View File

@ -34,7 +34,7 @@ from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log
from nova import unit
from nova.openstack.common import units
from nova import utils
import nova.virt.docker.client
from nova.virt.docker import hostinfo
@ -151,11 +151,11 @@ class DockerDriver(driver.ComputeDriver):
stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': memory['total'] / unit.Mi,
'memory_mb_used': memory['used'] / unit.Mi,
'local_gb': disk['total'] / unit.Gi,
'local_gb_used': disk['used'] / unit.Gi,
'disk_available_least': disk['available'] / unit.Gi,
'memory_mb': memory['total'] / units.Mi,
'memory_mb_used': memory['used'] / units.Mi,
'local_gb': disk['total'] / units.Gi,
'local_gb_used': disk['used'] / units.Gi,
'disk_available_least': disk['available'] / units.Gi,
'hypervisor_type': 'docker',
'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': self._nodename,
@ -251,7 +251,7 @@ class DockerDriver(driver.ComputeDriver):
def _get_memory_limit_bytes(self, instance):
system_meta = utils.instance_sys_meta(instance)
return int(system_meta.get('instance_type_memory_mb', 0)) * unit.Mi
return int(system_meta.get('instance_type_memory_mb', 0)) * units.Mi
def _get_image_name(self, context, instance, image):
fmt = image['container_format']

View File

@ -26,7 +26,7 @@ from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
@ -81,8 +81,8 @@ class HostOps(object):
drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / unit.Gi
free_gb = free_space / unit.Gi
total_gb = size / units.Gi
free_gb = free_space / units.Gi
used_gb = total_gb - free_gb
return (total_gb, free_gb, used_gb)

View File

@ -25,7 +25,7 @@ from nova.compute import flavors
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vhdutilsv2
@ -65,7 +65,7 @@ class ImageCache(object):
vhd_size = vhd_info['MaxInternalSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * unit.Gi
root_vhd_size = root_vhd_size_gb * units.Gi
# NOTE(lpetrut): Checking the namespace is needed as the following
# method is not yet implemented in the vhdutilsv2 module.

View File

@ -23,7 +23,7 @@ import os
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
@ -253,12 +253,12 @@ class MigrationOps(object):
src_base_disk_path)
if resize_instance:
new_size = instance['root_gb'] * unit.Gi
new_size = instance['root_gb'] * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * unit.Gi
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)

View File

@ -31,7 +31,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
@ -160,7 +160,7 @@ class VMOps(object):
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
root_vhd_size = instance['root_gb'] * unit.Gi
root_vhd_size = instance['root_gb'] * units.Gi
# NOTE(lpetrut): Checking the namespace is needed as the
# following method is not yet implemented in vhdutilsv2.
@ -192,7 +192,7 @@ class VMOps(object):
return root_vhd_path
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * unit.Gi
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()

View File

@ -28,7 +28,7 @@ helpers for populating up config object instances.
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from lxml import etree
@ -1073,7 +1073,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * unit.Mi
self.memory = 500 * units.Mi
self.vcpus = 1
self.cpuset = None
self.cpu = None

View File

@ -83,11 +83,11 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import unit
from nova import utils
from nova import version
from nova.virt import configdrive
@ -266,7 +266,7 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * unit.Ki
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
@ -2471,7 +2471,7 @@ class LibvirtDriver(driver.ComputeDriver):
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * unit.Gi
size = instance['root_gb'] * units.Gi
if size == 0 or suffix == '.rescue':
size = None
@ -2496,7 +2496,7 @@ class LibvirtDriver(driver.ComputeDriver):
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * unit.Gi
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
@ -2509,7 +2509,7 @@ class LibvirtDriver(driver.ComputeDriver):
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * unit.Gi
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
@ -2530,7 +2530,7 @@ class LibvirtDriver(driver.ComputeDriver):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * unit.Mi
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
@ -3572,7 +3572,7 @@ class LibvirtDriver(driver.ComputeDriver):
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / unit.Gi
info[k] = v / units.Gi
return info
@ -4046,7 +4046,7 @@ class LibvirtDriver(driver.ComputeDriver):
available = 0
if available_mb:
available = available_mb * unit.Mi
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
@ -4715,7 +4715,7 @@ class LibvirtDriver(driver.ComputeDriver):
size = instance['ephemeral_gb']
else:
size = 0
size *= unit.Gi
size *= units.Gi
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
@ -4988,8 +4988,8 @@ class HostState(object):
disk_over_committed = (self.driver.
get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * unit.Gi - disk_over_committed
return (available_least / unit.Gi)
available_least = disk_free_gb * units.Gi - disk_over_committed
return (available_least / units.Gi)
LOG.debug(_("Updating host stats"))
disk_info_dict = self.driver.get_local_gb_info()

View File

@ -29,7 +29,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
@ -323,7 +323,7 @@ class Qcow2(Image):
backing_parts[-1].isdigit():
legacy_backing_size = int(backing_parts[-1])
legacy_base += '_%d' % legacy_backing_size
legacy_backing_size *= unit.Gi
legacy_backing_size *= units.Gi
# Create the legacy backing file if necessary.
if legacy_backing_size:

View File

@ -30,7 +30,7 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt import images
@ -240,7 +240,7 @@ def create_lvm_image(vg, lv, size, sparse=False):
'lv': lv})
if sparse:
preallocated_space = 64 * unit.Mi
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
@ -357,7 +357,7 @@ def clear_logical_volume(path):
# for more or less security conscious setups.
vol_size = logical_volume_size(path)
bs = unit.Mi
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = vol_size

View File

@ -28,8 +28,8 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import unit
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
@ -429,8 +429,8 @@ class ResourcePool(ManagedObject):
memoryAllocation = DataObject()
cpuAllocation = DataObject()
memory.maxUsage = 1000 * unit.Mi
memory.overallUsage = 500 * unit.Mi
memory.maxUsage = 1000 * units.Mi
memory.overallUsage = 500 * units.Mi
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
@ -527,7 +527,7 @@ class ClusterComputeResource(ManagedObject):
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / unit.Mi
free_memory = (host_summary.hardware.memorySize / units.Mi
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
@ -541,8 +541,8 @@ class Datastore(ManagedObject):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", capacity * unit.Gi)
self.set("summary.freeSpace", free * unit.Gi)
self.set("summary.capacity", capacity * units.Gi)
self.set("summary.freeSpace", free * units.Gi)
self.set("summary.accessible", True)
self.set("browser", "")
@ -594,7 +594,7 @@ class HostSystem(ManagedObject):
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = unit.Gi
hardware.memorySize = units.Gi
summary.hardware = hardware
quickstats = DataObject()

View File

@ -21,7 +21,7 @@ Management class for host-related functions (start, reboot, etc).
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@ -128,10 +128,10 @@ class HostState(object):
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["disk_total"] = ds[2] / unit.Gi
data["disk_available"] = ds[3] / unit.Gi
data["disk_total"] = ds[2] / units.Gi
data["disk_available"] = ds[3] / units.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / unit.Mi
data["host_memory_total"] = summary.hardware.memorySize / units.Mi
data["host_memory_free"] = data["host_memory_total"] - \
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
@ -183,8 +183,8 @@ class VCState(object):
"model": stats['cpu']['model'],
"topology": {"cores": stats['cpu']['cores'],
"threads": stats['cpu']['vcpus']}}
data["disk_total"] = ds[2] / unit.Gi
data["disk_available"] = ds[3] / unit.Gi
data["disk_total"] = ds[2] / units.Gi
data["disk_available"] = ds[3] / units.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']

View File

@ -26,7 +26,7 @@ import copy
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
@ -805,9 +805,9 @@ def get_stats_from_cluster(session, cluster):
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / unit.Mi)
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / unit.Mi)
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats

View File

@ -38,8 +38,8 @@ from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
@ -247,7 +247,7 @@ class VMwareVMOps(object):
vif_model, image_linked_clone)
root_gb = instance['root_gb']
root_gb_in_kb = root_gb * unit.Mi
root_gb_in_kb = root_gb * units.Mi
(vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model,
image_linked_clone) = _get_image_properties(root_gb_in_kb)

View File

@ -43,7 +43,7 @@ from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt import driver
from nova.virt.xenapi.client import session
@ -456,12 +456,12 @@ class XenAPIDriver(driver.ComputeDriver):
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / unit.Mi
total_ram_mb = host_stats['host_memory_total'] / units.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / unit.Mi
total_disk_gb = host_stats['disk_total'] / unit.Gi
used_disk_gb = host_stats['disk_used'] / unit.Gi
free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi
total_disk_gb = host_stats['disk_total'] / units.Gi
used_disk_gb = host_stats['disk_used'] / units.Gi
hyper_ver = utils.convert_version_to_int(self._session.product_version)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,

View File

@ -64,7 +64,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import unit
from nova.openstack.common import units
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
@ -234,8 +234,8 @@ def after_VBD_create(vbd_ref, vbd_rec):
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('memory_static_max', str(8 * unit.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * unit.Gi))
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('resident_on', '')
@ -608,7 +608,7 @@ class SessionBase(object):
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * unit.Gi
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
@ -691,7 +691,7 @@ class SessionBase(object):
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * unit.Gi
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'

View File

@ -47,9 +47,9 @@ from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import versionutils
from nova.openstack.common import xmlutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt import cpu
@ -88,7 +88,7 @@ xenapi_vm_utils_opts = [
deprecated_group='DEFAULT',
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * unit.Mi,
default=16 * units.Mi,
deprecated_name='max_kernel_ramdisk_size',
deprecated_group='DEFAULT',
help='Maximum size in bytes of kernel or ramdisk images'),
@ -258,7 +258,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
3. Using hardware virtualization
"""
flavor = flavors.extract_flavor(instance)
mem = str(long(flavor['memory_mb']) * unit.Mi)
mem = str(long(flavor['memory_mb']) * units.Mi)
vcpus = str(flavor['vcpus'])
vcpu_weight = flavor['vcpu_weight']
@ -386,7 +386,7 @@ def is_vm_shutdown(session, vm_ref):
def is_enough_free_mem(session, instance):
flavor = flavors.extract_flavor(instance)
mem = long(flavor['memory_mb']) * unit.Mi
mem = long(flavor['memory_mb']) * units.Mi
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
session.host_ref))
return host_free_mem >= mem
@ -938,7 +938,7 @@ def _vdi_resize(session, vdi_ref, new_size):
def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * unit.Gi
new_disk_size = new_gb * units.Gi
msg = _("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
@ -977,7 +977,7 @@ def resize_disk(session, instance, vdi_ref, flavor):
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * unit.Gi
vdi_size = size_gb * units.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
@ -986,7 +986,7 @@ def resize_disk(session, instance, vdi_ref, flavor):
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * unit.Gi
virtual_size = size_gb * units.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
@ -1023,7 +1023,7 @@ def _auto_configure_disk(session, vdi_ref, new_gb):
_num, start, old_sectors, ptype = partitions[0]
if ptype in ('ext3', 'ext4'):
new_sectors = new_gb * unit.Gi / SECTOR_SIZE
new_sectors = new_gb * units.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
else:
reason = _('Disk contains a filesystem '
@ -1086,7 +1086,7 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = unit.Mi
ONE_MEG = units.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
@ -1515,8 +1515,7 @@ def _get_vdi_chain_size(session, vdi_uuid):
def _check_vdi_size(context, session, instance, vdi_uuid):
flavor = flavors.extract_flavor(instance)
allowed_size = (flavor['root_gb'] +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * unit.Gi
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
if not flavor['root_gb']:
# root_gb=0 indicates that we're disabling size checks
return

View File

@ -45,7 +45,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import unit
from nova.openstack.common import units
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
@ -2055,7 +2055,7 @@ class VMOps(object):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / unit.Mi
memory_mb = int(vm_rec['memory_static_max']) / units.Mi
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage

View File

@ -34,6 +34,7 @@ module=sslutils
module=strutils
module=threadgroup
module=timeutils
module=units
module=uuidutils
module=versionutils
module=xmlutils