Moves Hyper-V options to the hyperv section
Fixes Bug: 1140778 Due to the large number of options available in Nova, in order to achieve better option organization and naming (e.g. avoid the hyperv_* prefix) and to avoid option name clashes, options specific to the Hyper-V driver are moved to a section named "hyperv". Backwards compatibility is mantained by providing the "deprecated_group" argument in option definitions. For case consistency the group name as been changed to "hyperv" for options introduced in Grizzly belonging to the group "HYPERV" Change-Id: If2710822cdf6e4ae108a6d1e7977735ac8104a2e
This commit is contained in:
@@ -56,7 +56,7 @@ from nova.virt.hyperv import volumeutilsv2
|
|||||||
from nova.virt import images
|
from nova.virt import images
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif')
|
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
|
||||||
|
|
||||||
|
|
||||||
class HyperVAPITestCase(test.TestCase):
|
class HyperVAPITestCase(test.TestCase):
|
||||||
@@ -86,9 +86,11 @@ class HyperVAPITestCase(test.TestCase):
|
|||||||
self._setup_stubs()
|
self._setup_stubs()
|
||||||
|
|
||||||
self.flags(instances_path=r'C:\Hyper-V\test\instances',
|
self.flags(instances_path=r'C:\Hyper-V\test\instances',
|
||||||
vswitch_name='external',
|
network_api_class='nova.network.quantumv2.api.API')
|
||||||
network_api_class='nova.network.quantumv2.api.API',
|
|
||||||
force_volumeutils_v1=True)
|
self.flags(vswitch_name='external',
|
||||||
|
force_volumeutils_v1=True,
|
||||||
|
group='hyperv')
|
||||||
|
|
||||||
self._conn = driver_hyperv.HyperVDriver(None)
|
self._conn = driver_hyperv.HyperVDriver(None)
|
||||||
|
|
||||||
@@ -331,7 +333,7 @@ class HyperVAPITestCase(test.TestCase):
|
|||||||
cdb.__exit__(None, None, None).AndReturn(None)
|
cdb.__exit__(None, None, None).AndReturn(None)
|
||||||
|
|
||||||
if not use_cdrom:
|
if not use_cdrom:
|
||||||
utils.execute(CONF.qemu_img_cmd,
|
utils.execute(CONF.hyperv.qemu_img_cmd,
|
||||||
'convert',
|
'convert',
|
||||||
'-f',
|
'-f',
|
||||||
'raw',
|
'raw',
|
||||||
@@ -351,7 +353,7 @@ class HyperVAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
def _test_spawn_config_drive(self, use_cdrom):
|
def _test_spawn_config_drive(self, use_cdrom):
|
||||||
self.flags(force_config_drive=True)
|
self.flags(force_config_drive=True)
|
||||||
self.flags(config_drive_cdrom=use_cdrom)
|
self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
|
||||||
self.flags(mkisofs_cmd='mkisofs.exe')
|
self.flags(mkisofs_cmd='mkisofs.exe')
|
||||||
|
|
||||||
self._setup_spawn_config_drive_mocks(use_cdrom)
|
self._setup_spawn_config_drive_mocks(use_cdrom)
|
||||||
@@ -391,7 +393,7 @@ class HyperVAPITestCase(test.TestCase):
|
|||||||
fake_vswitch_port = 'fake port'
|
fake_vswitch_port = 'fake port'
|
||||||
|
|
||||||
m = networkutils.NetworkUtils.get_external_vswitch(
|
m = networkutils.NetworkUtils.get_external_vswitch(
|
||||||
CONF.vswitch_name)
|
CONF.hyperv.vswitch_name)
|
||||||
m.AndReturn(fake_vswitch_path)
|
m.AndReturn(fake_vswitch_path)
|
||||||
|
|
||||||
m = networkutils.NetworkUtils.create_vswitch_port(
|
m = networkutils.NetworkUtils.create_vswitch_port(
|
||||||
@@ -410,7 +412,7 @@ class HyperVAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
def setup_vif_mocks():
|
def setup_vif_mocks():
|
||||||
m = networkutils.NetworkUtils.get_external_vswitch(
|
m = networkutils.NetworkUtils.get_external_vswitch(
|
||||||
CONF.vswitch_name)
|
CONF.hyperv.vswitch_name)
|
||||||
m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
|
m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
|
||||||
|
|
||||||
self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
|
self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ hyperv_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(hyperv_opts, 'HYPERV')
|
CONF.register_opts(hyperv_opts, 'hyperv')
|
||||||
CONF.import_opt('instances_path', 'nova.compute.manager')
|
CONF.import_opt('instances_path', 'nova.compute.manager')
|
||||||
|
|
||||||
|
|
||||||
@@ -70,8 +70,8 @@ class PathUtils(object):
|
|||||||
local_instance_path = os.path.normpath(CONF.instances_path)
|
local_instance_path = os.path.normpath(CONF.instances_path)
|
||||||
|
|
||||||
if remote_server:
|
if remote_server:
|
||||||
if CONF.HYPERV.instances_path_share:
|
if CONF.hyperv.instances_path_share:
|
||||||
path = CONF.HYPERV.instances_path_share
|
path = CONF.hyperv.instances_path_share
|
||||||
else:
|
else:
|
||||||
# Use an administrative share
|
# Use an administrative share
|
||||||
path = local_instance_path.replace(':', '$')
|
path = local_instance_path.replace(':', '$')
|
||||||
|
|||||||
@@ -29,11 +29,12 @@ hyperv_opts = [
|
|||||||
default=None,
|
default=None,
|
||||||
help='External virtual switch Name, '
|
help='External virtual switch Name, '
|
||||||
'if not provided, the first external virtual '
|
'if not provided, the first external virtual '
|
||||||
'switch is used'),
|
'switch is used',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(hyperv_opts)
|
CONF.register_opts(hyperv_opts, 'hyperv')
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -69,7 +70,7 @@ class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
|
|||||||
|
|
||||||
def plug(self, instance, vif):
|
def plug(self, instance, vif):
|
||||||
vswitch_path = self._netutils.get_external_vswitch(
|
vswitch_path = self._netutils.get_external_vswitch(
|
||||||
CONF.vswitch_name)
|
CONF.hyperv.vswitch_name)
|
||||||
|
|
||||||
vm_name = instance['name']
|
vm_name = instance['name']
|
||||||
LOG.debug(_('Creating vswitch port for instance: %s') % vm_name)
|
LOG.debug(_('Creating vswitch port for instance: %s') % vm_name)
|
||||||
|
|||||||
@@ -44,22 +44,26 @@ hyperv_opts = [
|
|||||||
cfg.BoolOpt('limit_cpu_features',
|
cfg.BoolOpt('limit_cpu_features',
|
||||||
default=False,
|
default=False,
|
||||||
help='Required for live migration among '
|
help='Required for live migration among '
|
||||||
'hosts with different CPU features'),
|
'hosts with different CPU features',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
cfg.BoolOpt('config_drive_inject_password',
|
cfg.BoolOpt('config_drive_inject_password',
|
||||||
default=False,
|
default=False,
|
||||||
help='Sets the admin password in the config drive image'),
|
help='Sets the admin password in the config drive image',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
cfg.StrOpt('qemu_img_cmd',
|
cfg.StrOpt('qemu_img_cmd',
|
||||||
default="qemu-img.exe",
|
default="qemu-img.exe",
|
||||||
help='qemu-img is used to convert between '
|
help='qemu-img is used to convert between '
|
||||||
'different image types'),
|
'different image types',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
cfg.BoolOpt('config_drive_cdrom',
|
cfg.BoolOpt('config_drive_cdrom',
|
||||||
default=False,
|
default=False,
|
||||||
help='Attaches the Config Drive image as a cdrom drive '
|
help='Attaches the Config Drive image as a cdrom drive '
|
||||||
'instead of a disk drive')
|
'instead of a disk drive',
|
||||||
|
deprecated_group='DEFAULT')
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(hyperv_opts)
|
CONF.register_opts(hyperv_opts, 'hyperv')
|
||||||
CONF.import_opt('use_cow_images', 'nova.virt.driver')
|
CONF.import_opt('use_cow_images', 'nova.virt.driver')
|
||||||
CONF.import_opt('network_api_class', 'nova.network')
|
CONF.import_opt('network_api_class', 'nova.network')
|
||||||
|
|
||||||
@@ -160,7 +164,7 @@ class VMOps(object):
|
|||||||
self._vmutils.create_vm(instance_name,
|
self._vmutils.create_vm(instance_name,
|
||||||
instance['memory_mb'],
|
instance['memory_mb'],
|
||||||
instance['vcpus'],
|
instance['vcpus'],
|
||||||
CONF.limit_cpu_features)
|
CONF.hyperv.limit_cpu_features)
|
||||||
|
|
||||||
if boot_vhd_path:
|
if boot_vhd_path:
|
||||||
self._vmutils.attach_ide_drive(instance_name,
|
self._vmutils.attach_ide_drive(instance_name,
|
||||||
@@ -190,7 +194,7 @@ class VMOps(object):
|
|||||||
LOG.info(_('Using config drive for instance: %s'), instance=instance)
|
LOG.info(_('Using config drive for instance: %s'), instance=instance)
|
||||||
|
|
||||||
extra_md = {}
|
extra_md = {}
|
||||||
if admin_password and CONF.config_drive_inject_password:
|
if admin_password and CONF.hyperv.config_drive_inject_password:
|
||||||
extra_md['admin_pass'] = admin_password
|
extra_md['admin_pass'] = admin_password
|
||||||
|
|
||||||
inst_md = instance_metadata.InstanceMetadata(instance,
|
inst_md = instance_metadata.InstanceMetadata(instance,
|
||||||
@@ -211,11 +215,11 @@ class VMOps(object):
|
|||||||
LOG.error(_('Creating config drive failed with error: %s'),
|
LOG.error(_('Creating config drive failed with error: %s'),
|
||||||
e, instance=instance)
|
e, instance=instance)
|
||||||
|
|
||||||
if not CONF.config_drive_cdrom:
|
if not CONF.hyperv.config_drive_cdrom:
|
||||||
drive_type = constants.IDE_DISK
|
drive_type = constants.IDE_DISK
|
||||||
configdrive_path = os.path.join(instance_path,
|
configdrive_path = os.path.join(instance_path,
|
||||||
'configdrive.vhd')
|
'configdrive.vhd')
|
||||||
utils.execute(CONF.qemu_img_cmd,
|
utils.execute(CONF.hyperv.qemu_img_cmd,
|
||||||
'convert',
|
'convert',
|
||||||
'-f',
|
'-f',
|
||||||
'raw',
|
'raw',
|
||||||
|
|||||||
@@ -33,20 +33,24 @@ from nova.virt.hyperv import volumeutilsv2
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
hyper_volumeops_opts = [
|
hyper_volumeops_opts = [
|
||||||
cfg.IntOpt('hyperv_attaching_volume_retry_count',
|
cfg.IntOpt('volume_attach_retry_count',
|
||||||
default=10,
|
default=10,
|
||||||
help='The number of times we retry on attaching volume '),
|
help='The number of times to retry to attach a volume',
|
||||||
cfg.IntOpt('hyperv_wait_between_attach_retry',
|
deprecated_name='hyperv_attaching_volume_retry_count',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
|
cfg.IntOpt('volume_attach_retry_interval',
|
||||||
default=5,
|
default=5,
|
||||||
help='The seconds to wait between an volume '
|
help='Interval between volume attachment attempts, in seconds',
|
||||||
'attachment attempt'),
|
deprecated_name='hyperv_wait_between_attach_retry',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
cfg.BoolOpt('force_volumeutils_v1',
|
cfg.BoolOpt('force_volumeutils_v1',
|
||||||
default=False,
|
default=False,
|
||||||
help='Force volumeutils v1'),
|
help='Force volumeutils v1',
|
||||||
|
deprecated_group='DEFAULT'),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(hyper_volumeops_opts)
|
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
|
||||||
CONF.import_opt('my_ip', 'nova.netconf')
|
CONF.import_opt('my_ip', 'nova.netconf')
|
||||||
|
|
||||||
|
|
||||||
@@ -63,7 +67,7 @@ class VolumeOps(object):
|
|||||||
self._default_root_device = 'vda'
|
self._default_root_device = 'vda'
|
||||||
|
|
||||||
def _get_volume_utils(self):
|
def _get_volume_utils(self):
|
||||||
if(not CONF.force_volumeutils_v1 and
|
if(not CONF.hyperv.force_volumeutils_v1 and
|
||||||
self._hostutils.get_windows_version() >= 6.2):
|
self._hostutils.get_windows_version() >= 6.2):
|
||||||
return volumeutilsv2.VolumeUtilsV2()
|
return volumeutilsv2.VolumeUtilsV2()
|
||||||
else:
|
else:
|
||||||
@@ -171,12 +175,12 @@ class VolumeOps(object):
|
|||||||
LOG.debug(_('Device number: %(device_number)s, '
|
LOG.debug(_('Device number: %(device_number)s, '
|
||||||
'target lun: %(target_lun)s') % locals())
|
'target lun: %(target_lun)s') % locals())
|
||||||
#Finding Mounted disk drive
|
#Finding Mounted disk drive
|
||||||
for i in range(1, CONF.hyperv_attaching_volume_retry_count):
|
for i in range(1, CONF.hyperv.volume_attach_retry_count):
|
||||||
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
|
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
|
||||||
device_number)
|
device_number)
|
||||||
if mounted_disk_path:
|
if mounted_disk_path:
|
||||||
break
|
break
|
||||||
time.sleep(CONF.hyperv_wait_between_attach_retry)
|
time.sleep(CONF.hyperv.volume_attach_retry_interval)
|
||||||
|
|
||||||
if not mounted_disk_path:
|
if not mounted_disk_path:
|
||||||
raise vmutils.HyperVException(_('Unable to find a mounted disk '
|
raise vmutils.HyperVException(_('Unable to find a mounted disk '
|
||||||
|
|||||||
@@ -66,8 +66,10 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
|
|||||||
self.execute('iscsicli.exe ' + 'LisTargets')
|
self.execute('iscsicli.exe ' + 'LisTargets')
|
||||||
#Sending login
|
#Sending login
|
||||||
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
|
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
|
||||||
#Waiting the disk to be mounted. Research this to avoid sleep
|
#Waiting the disk to be mounted.
|
||||||
time.sleep(CONF.hyperv_wait_between_attach_retry)
|
#TODO(pnavarro): Check for the operation to end instead of
|
||||||
|
#relying on a timeout
|
||||||
|
time.sleep(CONF.hyperv.volume_attach_retry_interval)
|
||||||
|
|
||||||
def logout_storage_target(self, target_iqn):
|
def logout_storage_target(self, target_iqn):
|
||||||
"""Logs out storage target through its session id."""
|
"""Logs out storage target through its session id."""
|
||||||
|
|||||||
@@ -56,8 +56,10 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
|
|||||||
target = self._conn_storage.MSFT_iSCSITarget
|
target = self._conn_storage.MSFT_iSCSITarget
|
||||||
target.Connect(NodeAddress=target_iqn,
|
target.Connect(NodeAddress=target_iqn,
|
||||||
IsPersistent=True)
|
IsPersistent=True)
|
||||||
#Waiting the disk to be mounted. Research this
|
#Waiting the disk to be mounted.
|
||||||
time.sleep(CONF.hyperv_wait_between_attach_retry)
|
#TODO(pnavarro): Check for the operation to end instead of
|
||||||
|
#relying on a timeout
|
||||||
|
time.sleep(CONF.hyperv.volume_attach_retry_interval)
|
||||||
|
|
||||||
def logout_storage_target(self, target_iqn):
|
def logout_storage_target(self, target_iqn):
|
||||||
"""Logs out storage target through its session id."""
|
"""Logs out storage target through its session id."""
|
||||||
|
|||||||
Reference in New Issue
Block a user