VMware: Multiple cluster support using single compute service
To allow a single VC driver to model multiple clusters in vCenter
as multiple nova-compute nodes. The VC driver will be configured
to represent a set of clusters as compute nodes.
For example to specify two clusters named cluster-A and cluster-B,
the nova.conf should have:
cluster_name = cluster-A
cluster_name = cluster-B
DocImpact
Change-Id: Ia5464948cc30b9b744450f9c301c4f3afaff717b
Blueprint: multiple-clusters-managed-by-one-service
This commit is contained in:
@@ -3228,7 +3228,7 @@
|
|||||||
#host_password=<None>
|
#host_password=<None>
|
||||||
|
|
||||||
# Name of a VMware Cluster ComputeResource. Used only if
|
# Name of a VMware Cluster ComputeResource. Used only if
|
||||||
# compute_driver is vmwareapi.VMwareVCDriver. (string value)
|
# compute_driver is vmwareapi.VMwareVCDriver. (multi valued)
|
||||||
#cluster_name=<None>
|
#cluster_name=<None>
|
||||||
|
|
||||||
# Regex to match the name of a datastore. Used only if
|
# Regex to match the name of a datastore. Used only if
|
||||||
|
|||||||
@@ -35,7 +35,9 @@ class ConfigDriveTestCase(test.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ConfigDriveTestCase, self).setUp()
|
super(ConfigDriveTestCase, self).setUp()
|
||||||
self.context = context.RequestContext('fake', 'fake', is_admin=False)
|
self.context = context.RequestContext('fake', 'fake', is_admin=False)
|
||||||
self.flags(host_ip='test_url',
|
cluster_name = 'test_cluster'
|
||||||
|
self.flags(cluster_name=[cluster_name],
|
||||||
|
host_ip='test_url',
|
||||||
host_username='test_username',
|
host_username='test_username',
|
||||||
host_password='test_pass',
|
host_password='test_pass',
|
||||||
use_linked_clone=False, group='vmware')
|
use_linked_clone=False, group='vmware')
|
||||||
@@ -50,6 +52,8 @@ class ConfigDriveTestCase(test.TestCase):
|
|||||||
'disk_format': 'vhd',
|
'disk_format': 'vhd',
|
||||||
'size': 512,
|
'size': 512,
|
||||||
}
|
}
|
||||||
|
self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
|
||||||
|
cluster_name)
|
||||||
self.test_instance = {'node': 'test_url',
|
self.test_instance = {'node': 'test_url',
|
||||||
'vm_state': 'building',
|
'vm_state': 'building',
|
||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
@@ -70,7 +74,8 @@ class ConfigDriveTestCase(test.TestCase):
|
|||||||
'scheduling',
|
'scheduling',
|
||||||
'reservation_id': 'r-3t8muvr0',
|
'reservation_id': 'r-3t8muvr0',
|
||||||
'id': 1,
|
'id': 1,
|
||||||
'uuid': 'fake-uuid'}
|
'uuid': 'fake-uuid',
|
||||||
|
'node': self.node_name}
|
||||||
|
|
||||||
class FakeInstanceMetadata(object):
|
class FakeInstanceMetadata(object):
|
||||||
def __init__(self, instance, content=None, extra_md=None):
|
def __init__(self, instance, content=None, extra_md=None):
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
vmwareapi_fake.reset()
|
vmwareapi_fake.reset()
|
||||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||||
stubs.set_stubs(self.stubs)
|
stubs.set_stubs(self.stubs)
|
||||||
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
|
self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI)
|
||||||
# NOTE(vish): none of the network plugging code is actually
|
# NOTE(vish): none of the network plugging code is actually
|
||||||
# being tested
|
# being tested
|
||||||
self.network_info = utils.get_test_network_info()
|
self.network_info = utils.get_test_network_info()
|
||||||
@@ -138,7 +138,9 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
vmwareapi_fake.cleanup()
|
vmwareapi_fake.cleanup()
|
||||||
nova.tests.image.fake.FakeImageService_reset()
|
nova.tests.image.fake.FakeImageService_reset()
|
||||||
|
|
||||||
def _create_instance_in_the_db(self):
|
def _create_instance_in_the_db(self, node=None):
|
||||||
|
if not node:
|
||||||
|
node = self.node_name
|
||||||
values = {'name': '1',
|
values = {'name': '1',
|
||||||
'id': 1,
|
'id': 1,
|
||||||
'uuid': "fake-uuid",
|
'uuid': "fake-uuid",
|
||||||
@@ -149,27 +151,29 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
'ramdisk_id': "1",
|
'ramdisk_id': "1",
|
||||||
'mac_address': "de:ad:be:ef:be:ef",
|
'mac_address': "de:ad:be:ef:be:ef",
|
||||||
'instance_type': 'm1.large',
|
'instance_type': 'm1.large',
|
||||||
'node': self.node_name,
|
'node': node,
|
||||||
}
|
}
|
||||||
self.instance = db.instance_create(None, values)
|
self.instance = db.instance_create(None, values)
|
||||||
|
|
||||||
def _create_vm(self):
|
def _create_vm(self, node=None, num_instances=1):
|
||||||
"""Create and spawn the VM."""
|
"""Create and spawn the VM."""
|
||||||
self._create_instance_in_the_db()
|
if not node:
|
||||||
|
node = self.node_name
|
||||||
|
self._create_instance_in_the_db(node=node)
|
||||||
self.type_data = db.flavor_get_by_name(None, 'm1.large')
|
self.type_data = db.flavor_get_by_name(None, 'm1.large')
|
||||||
self.conn.spawn(self.context, self.instance, self.image,
|
self.conn.spawn(self.context, self.instance, self.image,
|
||||||
injected_files=[], admin_password=None,
|
injected_files=[], admin_password=None,
|
||||||
network_info=self.network_info,
|
network_info=self.network_info,
|
||||||
block_device_info=None)
|
block_device_info=None)
|
||||||
self._check_vm_record()
|
self._check_vm_record(num_instances=num_instances)
|
||||||
|
|
||||||
def _check_vm_record(self):
|
def _check_vm_record(self, num_instances=1):
|
||||||
"""
|
"""
|
||||||
Check if the spawned VM's properties correspond to the instance in
|
Check if the spawned VM's properties correspond to the instance in
|
||||||
the db.
|
the db.
|
||||||
"""
|
"""
|
||||||
instances = self.conn.list_instances()
|
instances = self.conn.list_instances()
|
||||||
self.assertEquals(len(instances), 1)
|
self.assertEquals(len(instances), num_instances)
|
||||||
|
|
||||||
# Get Nova record for VM
|
# Get Nova record for VM
|
||||||
vm_info = self.conn.get_info({'uuid': 'fake-uuid',
|
vm_info = self.conn.get_info({'uuid': 'fake-uuid',
|
||||||
@@ -473,9 +477,7 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
|
|
||||||
def _test_finish_migration(self, power_on):
|
def _test_finish_migration(self, power_on):
|
||||||
"""
|
"""
|
||||||
Tests the finish_migration method on vmops via the
|
Tests the finish_migration method on vmops
|
||||||
VMwareVCDriver. Results are checked against whether or not
|
|
||||||
the underlying instance should have been powered on.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.power_on_called = False
|
self.power_on_called = False
|
||||||
@@ -504,22 +506,21 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
disk_info=None,
|
disk_info=None,
|
||||||
network_info=None,
|
network_info=None,
|
||||||
block_device_info=None,
|
block_device_info=None,
|
||||||
|
resize_instance=False,
|
||||||
image_meta=None,
|
image_meta=None,
|
||||||
power_on=power_on)
|
power_on=power_on)
|
||||||
# verify the results
|
|
||||||
self.assertEquals(power_on, self.power_on_called)
|
|
||||||
|
|
||||||
def test_finish_migration_power_on(self):
|
def test_finish_migration_power_on(self):
|
||||||
self._test_finish_migration(power_on=True)
|
self.assertRaises(NotImplementedError,
|
||||||
|
self._test_finish_migration, power_on=True)
|
||||||
|
|
||||||
def test_finish_migration_power_off(self):
|
def test_finish_migration_power_off(self):
|
||||||
self._test_finish_migration(power_on=False)
|
self.assertRaises(NotImplementedError,
|
||||||
|
self._test_finish_migration, power_on=False)
|
||||||
|
|
||||||
def _test_finish_revert_migration(self, power_on):
|
def _test_finish_revert_migration(self, power_on):
|
||||||
"""
|
"""
|
||||||
Tests the finish_revert_migration method on vmops via the
|
Tests the finish_revert_migration method on vmops
|
||||||
VMwareVCDriver. Results are checked against whether or not
|
|
||||||
the underlying instance should have been powered on.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# setup the test instance in the database
|
# setup the test instance in the database
|
||||||
@@ -564,14 +565,14 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
self.conn.finish_revert_migration(instance=self.instance,
|
self.conn.finish_revert_migration(instance=self.instance,
|
||||||
network_info=None,
|
network_info=None,
|
||||||
power_on=power_on)
|
power_on=power_on)
|
||||||
# verify the results
|
|
||||||
self.assertEquals(power_on, self.power_on_called)
|
|
||||||
|
|
||||||
def test_finish_revert_migration_power_on(self):
|
def test_finish_revert_migration_power_on(self):
|
||||||
self._test_finish_revert_migration(power_on=True)
|
self.assertRaises(NotImplementedError,
|
||||||
|
self._test_finish_migration, power_on=True)
|
||||||
|
|
||||||
def test_finish_revert_migration_power_off(self):
|
def test_finish_revert_migration_power_off(self):
|
||||||
self._test_finish_revert_migration(power_on=False)
|
self.assertRaises(NotImplementedError,
|
||||||
|
self._test_finish_migration, power_on=False)
|
||||||
|
|
||||||
def test_diagnostics_non_existent_vm(self):
|
def test_diagnostics_non_existent_vm(self):
|
||||||
self._create_instance_in_the_db()
|
self._create_instance_in_the_db()
|
||||||
@@ -596,7 +597,7 @@ class VMwareAPIVMTestCase(test.TestCase):
|
|||||||
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
|
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
|
||||||
fake_vm_id = int(fake_vm.obj.value.replace('vm-', ''))
|
fake_vm_id = int(fake_vm.obj.value.replace('vm-', ''))
|
||||||
vnc_dict = self.conn.get_vnc_console(self.instance)
|
vnc_dict = self.conn.get_vnc_console(self.instance)
|
||||||
self.assertEquals(vnc_dict['host'], "ha-host")
|
self.assertEquals(vnc_dict['host'], 'test_url')
|
||||||
self.assertEquals(vnc_dict['port'], cfg.CONF.vmware.vnc_port +
|
self.assertEquals(vnc_dict['port'], cfg.CONF.vmware.vnc_port +
|
||||||
fake_vm_id % cfg.CONF.vmware.vnc_port_total)
|
fake_vm_id % cfg.CONF.vmware.vnc_port_total)
|
||||||
|
|
||||||
@@ -804,10 +805,18 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(VMwareAPIVCDriverTestCase, self).setUp()
|
super(VMwareAPIVCDriverTestCase, self).setUp()
|
||||||
self.flags(cluster_name='test_cluster',
|
cluster_name = 'test_cluster'
|
||||||
|
cluster_name2 = 'test_cluster2'
|
||||||
|
self.flags(cluster_name=[cluster_name, cluster_name2],
|
||||||
task_poll_interval=10, datastore_regex='.*', group='vmware')
|
task_poll_interval=10, datastore_regex='.*', group='vmware')
|
||||||
self.flags(vnc_enabled=False)
|
self.flags(vnc_enabled=False)
|
||||||
self.conn = driver.VMwareVCDriver(None, False)
|
self.conn = driver.VMwareVCDriver(None, False)
|
||||||
|
node = self.conn._resources.keys()[0]
|
||||||
|
self.node_name = '%s(%s)' % (node,
|
||||||
|
self.conn._resources[node]['name'])
|
||||||
|
node = self.conn._resources.keys()[1]
|
||||||
|
self.node_name2 = '%s(%s)' % (node,
|
||||||
|
self.conn._resources[node]['name'])
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(VMwareAPIVCDriverTestCase, self).tearDown()
|
super(VMwareAPIVCDriverTestCase, self).tearDown()
|
||||||
@@ -822,13 +831,51 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
|
|||||||
self.assertEquals(stats['memory_mb_used'], 1024 - 524)
|
self.assertEquals(stats['memory_mb_used'], 1024 - 524)
|
||||||
self.assertEquals(stats['hypervisor_type'], 'VMware ESXi')
|
self.assertEquals(stats['hypervisor_type'], 'VMware ESXi')
|
||||||
self.assertEquals(stats['hypervisor_version'], '5.0.0')
|
self.assertEquals(stats['hypervisor_version'], '5.0.0')
|
||||||
self.assertEquals(stats['hypervisor_hostname'], 'test_url')
|
self.assertEquals(stats['hypervisor_hostname'], self.node_name)
|
||||||
self.assertEquals(stats['supported_instances'],
|
self.assertEquals(stats['supported_instances'],
|
||||||
'[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
|
'[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
|
||||||
|
|
||||||
def test_invalid_datastore_regex(self):
|
def test_invalid_datastore_regex(self):
|
||||||
# Tests if we raise an exception for Invalid Regular Expression in
|
# Tests if we raise an exception for Invalid Regular Expression in
|
||||||
# vmware_datastore_regex
|
# vmware_datastore_regex
|
||||||
self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01',
|
self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
|
||||||
group='vmware')
|
group='vmware')
|
||||||
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
|
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
|
||||||
|
|
||||||
|
def test_get_available_nodes(self):
|
||||||
|
nodelist = self.conn.get_available_nodes()
|
||||||
|
self.assertEquals(nodelist, [self.node_name, self.node_name2])
|
||||||
|
|
||||||
|
def test_spawn_multiple_node(self):
|
||||||
|
self._create_vm(node=self.node_name, num_instances=1)
|
||||||
|
info = self.conn.get_info({'uuid': 'fake-uuid'})
|
||||||
|
self._check_vm_info(info, power_state.RUNNING)
|
||||||
|
self._create_vm(node=self.node_name2, num_instances=2)
|
||||||
|
info = self.conn.get_info({'uuid': 'fake-uuid'})
|
||||||
|
self._check_vm_info(info, power_state.RUNNING)
|
||||||
|
|
||||||
|
def test_finish_migration_power_on(self):
|
||||||
|
self._test_finish_migration(power_on=True)
|
||||||
|
self.assertEquals(True, self.power_on_called)
|
||||||
|
|
||||||
|
def test_finish_migration_power_off(self):
|
||||||
|
self._test_finish_migration(power_on=False)
|
||||||
|
self.assertEquals(False, self.power_on_called)
|
||||||
|
|
||||||
|
def test_finish_revert_migration_power_on(self):
|
||||||
|
self._test_finish_revert_migration(power_on=True)
|
||||||
|
self.assertEquals(True, self.power_on_called)
|
||||||
|
|
||||||
|
def test_finish_revert_migration_power_off(self):
|
||||||
|
self._test_finish_revert_migration(power_on=False)
|
||||||
|
self.assertEquals(False, self.power_on_called)
|
||||||
|
|
||||||
|
def test_get_vnc_console(self):
|
||||||
|
self._create_instance_in_the_db()
|
||||||
|
self._create_vm()
|
||||||
|
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
|
||||||
|
fake_vm_id = int(fake_vm.obj.value.replace('vm-', ''))
|
||||||
|
vnc_dict = self.conn.get_vnc_console(self.instance)
|
||||||
|
self.assertEquals(vnc_dict['host'], "ha-host")
|
||||||
|
self.assertEquals(vnc_dict['port'], cfg.CONF.vmware.vnc_port +
|
||||||
|
fake_vm_id % cfg.CONF.vmware.vnc_port_total)
|
||||||
|
|||||||
@@ -64,12 +64,11 @@ vmwareapi_opts = [
|
|||||||
'Used only if compute_driver is '
|
'Used only if compute_driver is '
|
||||||
'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.',
|
'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.',
|
||||||
secret=True),
|
secret=True),
|
||||||
cfg.StrOpt('cluster_name',
|
cfg.MultiStrOpt('cluster_name',
|
||||||
deprecated_name='vmwareapi_cluster_name',
|
deprecated_name='vmwareapi_cluster_name',
|
||||||
deprecated_group='DEFAULT',
|
deprecated_group='DEFAULT',
|
||||||
help='Name of a VMware Cluster ComputeResource. '
|
help='Name of a VMware Cluster ComputeResource. Used only if '
|
||||||
'Used only if compute_driver is '
|
'compute_driver is vmwareapi.VMwareVCDriver.'),
|
||||||
'vmwareapi.VMwareVCDriver.'),
|
|
||||||
cfg.StrOpt('datastore_regex',
|
cfg.StrOpt('datastore_regex',
|
||||||
help='Regex to match the name of a datastore. '
|
help='Regex to match the name of a datastore. '
|
||||||
'Used only if compute_driver is '
|
'Used only if compute_driver is '
|
||||||
@@ -302,19 +301,8 @@ class VMwareESXDriver(driver.ComputeDriver):
|
|||||||
'username': CONF.vmware.host_username,
|
'username': CONF.vmware.host_username,
|
||||||
'password': CONF.vmware.host_password}
|
'password': CONF.vmware.host_password}
|
||||||
|
|
||||||
def get_available_resource(self, nodename):
|
def _get_available_resources(self, host_stats):
|
||||||
"""Retrieve resource information.
|
return {'vcpus': host_stats['vcpus'],
|
||||||
|
|
||||||
This method is called when nova-compute launches, and
|
|
||||||
as part of a periodic task that records the results in the DB.
|
|
||||||
|
|
||||||
:returns: dictionary describing resources
|
|
||||||
|
|
||||||
"""
|
|
||||||
host_stats = self.get_host_stats(refresh=True)
|
|
||||||
|
|
||||||
# Updating host information
|
|
||||||
dic = {'vcpus': host_stats["vcpus"],
|
|
||||||
'memory_mb': host_stats['host_memory_total'],
|
'memory_mb': host_stats['host_memory_total'],
|
||||||
'local_gb': host_stats['disk_total'],
|
'local_gb': host_stats['disk_total'],
|
||||||
'vcpus_used': 0,
|
'vcpus_used': 0,
|
||||||
@@ -329,7 +317,19 @@ class VMwareESXDriver(driver.ComputeDriver):
|
|||||||
host_stats['supported_instances']),
|
host_stats['supported_instances']),
|
||||||
}
|
}
|
||||||
|
|
||||||
return dic
|
def get_available_resource(self, nodename):
|
||||||
|
"""Retrieve resource information.
|
||||||
|
|
||||||
|
This method is called when nova-compute launches, and
|
||||||
|
as part of a periodic task that records the results in the DB.
|
||||||
|
|
||||||
|
:returns: dictionary describing resources
|
||||||
|
|
||||||
|
"""
|
||||||
|
host_stats = self.get_host_stats(refresh=True)
|
||||||
|
|
||||||
|
# Updating host information
|
||||||
|
return self._get_available_resources(host_stats)
|
||||||
|
|
||||||
def update_host_status(self):
|
def update_host_status(self):
|
||||||
"""Update the status info of the host, and return those values
|
"""Update the status info of the host, and return those values
|
||||||
@@ -384,15 +384,24 @@ class VMwareVCDriver(VMwareESXDriver):
|
|||||||
|
|
||||||
def __init__(self, virtapi, read_only=False, scheme="https"):
|
def __init__(self, virtapi, read_only=False, scheme="https"):
|
||||||
super(VMwareVCDriver, self).__init__(virtapi)
|
super(VMwareVCDriver, self).__init__(virtapi)
|
||||||
self._cluster_name = CONF.vmware.cluster_name
|
|
||||||
if not self._cluster_name:
|
# Get the list of clusters to be used
|
||||||
self._cluster = None
|
self._cluster_names = CONF.vmware.cluster_name
|
||||||
else:
|
self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
|
||||||
self._cluster = vm_util.get_cluster_ref_from_name(
|
self._cluster_names)
|
||||||
self._session, self._cluster_name)
|
if not self.dict_mors:
|
||||||
if self._cluster is None:
|
raise exception.NotFound(_("All clusters specified %s were not"
|
||||||
raise exception.NotFound(_("VMware Cluster %s is not found")
|
" found in the vCenter")
|
||||||
% self._cluster_name)
|
% self._cluster_names)
|
||||||
|
|
||||||
|
# Check if there are any clusters that were specified in the nova.conf
|
||||||
|
# but are not in the vCenter, for missing clusters log a warning.
|
||||||
|
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
|
||||||
|
missing_clusters = set(self._cluster_names) - set(clusters_found)
|
||||||
|
if missing_clusters:
|
||||||
|
LOG.warn(_("The following clusters could not be found in the"
|
||||||
|
" vCenter %s") % list(missing_clusters))
|
||||||
|
|
||||||
self._datastore_regex = None
|
self._datastore_regex = None
|
||||||
if CONF.vmware.datastore_regex:
|
if CONF.vmware.datastore_regex:
|
||||||
try:
|
try:
|
||||||
@@ -401,21 +410,18 @@ class VMwareVCDriver(VMwareESXDriver):
|
|||||||
raise exception.InvalidInput(reason=
|
raise exception.InvalidInput(reason=
|
||||||
_("Invalid Regular Expression %s")
|
_("Invalid Regular Expression %s")
|
||||||
% CONF.vmware.datastore_regex)
|
% CONF.vmware.datastore_regex)
|
||||||
self._volumeops = volumeops.VMwareVolumeOps(self._session,
|
# The _resources is used to maintain the vmops, volumeops and vcstate
|
||||||
cluster=self._cluster,
|
# objects per cluster
|
||||||
vc_support=True)
|
self._resources = {}
|
||||||
self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
|
self._virtapi = virtapi
|
||||||
self._volumeops, self._cluster,
|
self._update_resources()
|
||||||
self._datastore_regex)
|
|
||||||
self._vc_state = None
|
|
||||||
|
|
||||||
@property
|
# The following initialization is necessary since the base class does
|
||||||
def host_state(self):
|
# not use VC state.
|
||||||
if not self._vc_state:
|
first_cluster = self._resources.keys()[0]
|
||||||
self._vc_state = host.VCState(self._session,
|
self._vmops = self._resources.get(first_cluster).get('vmops')
|
||||||
self._host_ip,
|
self._volumeops = self._resources.get(first_cluster).get('volumeops')
|
||||||
self._cluster)
|
self._vc_state = self._resources.get(first_cluster).get('vcstate')
|
||||||
return self._vc_state
|
|
||||||
|
|
||||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||||
instance_type, network_info,
|
instance_type, network_info,
|
||||||
@@ -461,6 +467,161 @@ class VMwareVCDriver(VMwareESXDriver):
|
|||||||
# itself. You must talk to the VNC host underneath vCenter.
|
# itself. You must talk to the VNC host underneath vCenter.
|
||||||
return self._vmops.get_vnc_console_vcenter(instance)
|
return self._vmops.get_vnc_console_vcenter(instance)
|
||||||
|
|
||||||
|
def _update_resources(self):
|
||||||
|
"""This method creates a dictionary of VMOps, VolumeOps and VCState.
|
||||||
|
|
||||||
|
The VMwareVMOps, VMwareVolumeOps and VCState object is for each
|
||||||
|
cluster/rp. The dictionary is of the form
|
||||||
|
{
|
||||||
|
domain-1000 : {'vmops': vmops_obj,
|
||||||
|
'volumeops': volumeops_obj,
|
||||||
|
'vcstate': vcstate_obj,
|
||||||
|
'name': MyCluster},
|
||||||
|
resgroup-1000 : {'vmops': vmops_obj,
|
||||||
|
'volumeops': volumeops_obj,
|
||||||
|
'vcstate': vcstate_obj,
|
||||||
|
'name': MyRP},
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO(kirankv) we can avoid creating multiple vmops and volumeops
|
||||||
|
# if we make them utility class so that cluster is passed as a
|
||||||
|
# parameter to the method
|
||||||
|
added_nodes = set(self.dict_mors.keys()) - set(self._resources.keys())
|
||||||
|
for node in added_nodes:
|
||||||
|
_volumeops = volumeops.VMwareVolumeOps(self._session,
|
||||||
|
self.dict_mors[node]['cluster_mor'],
|
||||||
|
vc_support=True)
|
||||||
|
_vmops = vmops.VMwareVMOps(self._session, self._virtapi,
|
||||||
|
_volumeops,
|
||||||
|
self.dict_mors[node]['cluster_mor'])
|
||||||
|
name = self.dict_mors.get(node)['name']
|
||||||
|
_vc_state = host.VCState(self._session,
|
||||||
|
self._create_nodename(node, name),
|
||||||
|
self.dict_mors.get(node)['cluster_mor'])
|
||||||
|
self._resources[node] = {'vmops': _vmops,
|
||||||
|
'volumeops': _volumeops,
|
||||||
|
'vcstate': _vc_state,
|
||||||
|
'name': name,
|
||||||
|
}
|
||||||
|
deleted_nodes = (set(self._resources.keys()) -
|
||||||
|
set(self.dict_mors.keys()))
|
||||||
|
for node in deleted_nodes:
|
||||||
|
LOG.debug(_("Removing node %s since its removed from"
|
||||||
|
" nova.conf") % node)
|
||||||
|
del self._resources[node]
|
||||||
|
|
||||||
|
def _create_nodename(self, mo_id, display_name):
|
||||||
|
"""Creates the name that is stored in hypervisor_hostname column.
|
||||||
|
|
||||||
|
The name will be of the form similar to
|
||||||
|
domain-1000(MyCluster)
|
||||||
|
resgroup-1000(MyResourcePool)
|
||||||
|
"""
|
||||||
|
return mo_id + '(' + display_name + ')'
|
||||||
|
|
||||||
|
def _get_mo_id(self, nodename):
|
||||||
|
return nodename.partition('(')[0]
|
||||||
|
|
||||||
|
def _get_vmops_for_compute_node(self, nodename):
|
||||||
|
"""Retrieve vmops object from mo_id stored in the node name.
|
||||||
|
|
||||||
|
Node name is of the form domain-1000(MyCluster)
|
||||||
|
"""
|
||||||
|
return self._resources.get(self._get_mo_id(nodename)).get('vmops')
|
||||||
|
|
||||||
|
def _get_volumeops_for_compute_node(self, nodename):
|
||||||
|
"""Retrieve vmops object from mo_id stored in the node name.
|
||||||
|
|
||||||
|
Node name is of the form domain-1000(MyCluster)
|
||||||
|
"""
|
||||||
|
return self._resources.get(self._get_mo_id(nodename)).get('volumeops')
|
||||||
|
|
||||||
|
def _get_vc_state_for_compute_node(self, nodename):
|
||||||
|
"""Retrieve VCState object from mo_id stored in the node name.
|
||||||
|
|
||||||
|
Node name is of the form domain-1000(MyCluster)
|
||||||
|
"""
|
||||||
|
return self._resources.get(self._get_mo_id(nodename)).get('vcstate')
|
||||||
|
|
||||||
|
def get_available_resource(self, nodename):
|
||||||
|
"""Retrieve resource info.
|
||||||
|
|
||||||
|
This method is called when nova-compute launches, and
|
||||||
|
as part of a periodic task.
|
||||||
|
|
||||||
|
:returns: dictionary describing resources
|
||||||
|
|
||||||
|
"""
|
||||||
|
stats_dict = {}
|
||||||
|
vc_state = self._get_vc_state_for_compute_node(nodename)
|
||||||
|
if vc_state:
|
||||||
|
host_stats = vc_state.get_host_stats(refresh=True)
|
||||||
|
|
||||||
|
# Updating host information
|
||||||
|
stats_dict = self._get_available_resources(host_stats)
|
||||||
|
|
||||||
|
else:
|
||||||
|
LOG.info(_("Invalid cluster or resource pool"
|
||||||
|
" name : %s") % nodename)
|
||||||
|
|
||||||
|
return stats_dict
|
||||||
|
|
||||||
|
def get_available_nodes(self):
|
||||||
|
"""Returns nodenames of all nodes managed by the compute service.
|
||||||
|
|
||||||
|
This method is for multi compute-nodes support. If a driver supports
|
||||||
|
multi compute-nodes, this method returns a list of nodenames managed
|
||||||
|
by the service. Otherwise, this method should return
|
||||||
|
[hypervisor_hostname].
|
||||||
|
"""
|
||||||
|
self.dict_mors = vm_util.get_all_cluster_refs_by_name(
|
||||||
|
self._session,
|
||||||
|
CONF.vmware.cluster_name)
|
||||||
|
nodes = self.dict_mors.keys()
|
||||||
|
node_list = []
|
||||||
|
self._update_resources()
|
||||||
|
for node in self.dict_mors.keys():
|
||||||
|
nodename = self._create_nodename(node,
|
||||||
|
self.dict_mors.get(node)['name'])
|
||||||
|
node_list.append(nodename)
|
||||||
|
LOG.debug(_("The available nodes are: %s") % node_list)
|
||||||
|
return node_list
|
||||||
|
|
||||||
|
def get_host_stats(self, refresh=True):
|
||||||
|
"""Return currently known host stats."""
|
||||||
|
stats_list = []
|
||||||
|
nodes = self.get_available_nodes()
|
||||||
|
for node in nodes:
|
||||||
|
stats_list.append(self.get_available_resource(node))
|
||||||
|
return stats_list
|
||||||
|
|
||||||
|
def spawn(self, context, instance, image_meta, injected_files,
|
||||||
|
admin_password, network_info=None, block_device_info=None):
|
||||||
|
"""Create VM instance."""
|
||||||
|
_vmops = self._get_vmops_for_compute_node(instance['node'])
|
||||||
|
_vmops.spawn(context, instance, image_meta, injected_files,
|
||||||
|
admin_password, network_info, block_device_info)
|
||||||
|
|
||||||
|
def attach_volume(self, connection_info, instance, mountpoint):
|
||||||
|
"""Attach volume storage to VM instance."""
|
||||||
|
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
|
||||||
|
return _volumeops.attach_volume(connection_info,
|
||||||
|
instance,
|
||||||
|
mountpoint)
|
||||||
|
|
||||||
|
def detach_volume(self, connection_info, instance, mountpoint):
|
||||||
|
"""Detach volume storage to VM instance."""
|
||||||
|
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
|
||||||
|
return _volumeops.detach_volume(connection_info,
|
||||||
|
instance,
|
||||||
|
mountpoint)
|
||||||
|
|
||||||
|
def get_volume_connector(self, instance):
|
||||||
|
"""Return volume connector information."""
|
||||||
|
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
|
||||||
|
return _volumeops.get_volume_connector(instance)
|
||||||
|
|
||||||
|
|
||||||
class VMwareAPISession(object):
|
class VMwareAPISession(object):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -63,7 +63,8 @@ def reset():
|
|||||||
create_datacenter()
|
create_datacenter()
|
||||||
create_datastore()
|
create_datastore()
|
||||||
create_res_pool()
|
create_res_pool()
|
||||||
create_cluster()
|
create_cluster('test_cluster')
|
||||||
|
create_cluster('test_cluster2')
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
@@ -350,9 +351,50 @@ class Network(ManagedObject):
|
|||||||
class ResourcePool(ManagedObject):
|
class ResourcePool(ManagedObject):
|
||||||
"""Resource Pool class."""
|
"""Resource Pool class."""
|
||||||
|
|
||||||
def __init__(self, name="test-rpool"):
|
def __init__(self, name="test-rpool", value="resgroup-test"):
|
||||||
super(ResourcePool, self).__init__("rp")
|
super(ResourcePool, self).__init__("rp")
|
||||||
self.set("name", name)
|
self.set("name", name)
|
||||||
|
self.set("name", "test_ResPool")
|
||||||
|
summary = DataObject()
|
||||||
|
runtime = DataObject()
|
||||||
|
config = DataObject()
|
||||||
|
memory = DataObject()
|
||||||
|
cpu = DataObject()
|
||||||
|
|
||||||
|
memoryAllocation = DataObject()
|
||||||
|
cpuAllocation = DataObject()
|
||||||
|
|
||||||
|
memory.maxUsage = 1000 * 1024 * 1024
|
||||||
|
memory.overallUsage = 500 * 1024 * 1024
|
||||||
|
cpu.maxUsage = 10000
|
||||||
|
cpu.overallUsage = 1000
|
||||||
|
runtime.cpu = cpu
|
||||||
|
runtime.memory = memory
|
||||||
|
summary.runtime = runtime
|
||||||
|
cpuAllocation.limit = 10000
|
||||||
|
memoryAllocation.limit = 1024
|
||||||
|
memoryAllocation.reservation = 1024
|
||||||
|
config.memoryAllocation = memoryAllocation
|
||||||
|
config.cpuAllocation = cpuAllocation
|
||||||
|
self.set("summary", summary)
|
||||||
|
self.set("config", config)
|
||||||
|
parent = ManagedObjectReference(value=value,
|
||||||
|
name=name)
|
||||||
|
owner = ManagedObjectReference(value=value,
|
||||||
|
name=name)
|
||||||
|
self.set("parent", parent)
|
||||||
|
self.set("owner", owner)
|
||||||
|
|
||||||
|
|
||||||
|
class DatastoreHostMount(DataObject):
|
||||||
|
def __init__(self, value='host-100'):
|
||||||
|
super(DatastoreHostMount, self).__init__()
|
||||||
|
host_ref = (_db_content["HostSystem"]
|
||||||
|
[_db_content["HostSystem"].keys()[0]].obj)
|
||||||
|
host_system = DataObject()
|
||||||
|
host_system.ManagedObjectReference = [host_ref]
|
||||||
|
host_system.value = value
|
||||||
|
self.key = host_system
|
||||||
|
|
||||||
|
|
||||||
class ClusterComputeResource(ManagedObject):
|
class ClusterComputeResource(ManagedObject):
|
||||||
@@ -373,6 +415,7 @@ class ClusterComputeResource(ManagedObject):
|
|||||||
summary.totalMemory = 0
|
summary.totalMemory = 0
|
||||||
summary.effectiveMemory = 0
|
summary.effectiveMemory = 0
|
||||||
self.set("summary", summary)
|
self.set("summary", summary)
|
||||||
|
self.set("summary.effectiveCpu", 10000)
|
||||||
|
|
||||||
def _add_resource_pool(self, r_pool):
|
def _add_resource_pool(self, r_pool):
|
||||||
if r_pool:
|
if r_pool:
|
||||||
@@ -477,6 +520,7 @@ class HostSystem(ManagedObject):
|
|||||||
hardware.numCpuThreads = 16
|
hardware.numCpuThreads = 16
|
||||||
hardware.vendor = "Intel"
|
hardware.vendor = "Intel"
|
||||||
hardware.cpuModel = "Intel(R) Xeon(R)"
|
hardware.cpuModel = "Intel(R) Xeon(R)"
|
||||||
|
hardware.uuid = "host-uuid"
|
||||||
hardware.memorySize = 1024 * 1024 * 1024
|
hardware.memorySize = 1024 * 1024 * 1024
|
||||||
summary.hardware = hardware
|
summary.hardware = hardware
|
||||||
|
|
||||||
@@ -497,7 +541,9 @@ class HostSystem(ManagedObject):
|
|||||||
net_info_pnic.PhysicalNic = [pnic_do]
|
net_info_pnic.PhysicalNic = [pnic_do]
|
||||||
|
|
||||||
self.set("summary", summary)
|
self.set("summary", summary)
|
||||||
self.set("summary.hardware", hardware)
|
self.set("capability.maxHostSupportedVcpus", 600)
|
||||||
|
self.set("summary.runtime.inMaintenanceMode", False)
|
||||||
|
self.set("runtime.connectionState", "connected")
|
||||||
self.set("config.network.pnic", net_info_pnic)
|
self.set("config.network.pnic", net_info_pnic)
|
||||||
self.set("connected", connected)
|
self.set("connected", connected)
|
||||||
|
|
||||||
@@ -623,10 +669,11 @@ def create_network():
|
|||||||
_create_object('Network', network)
|
_create_object('Network', network)
|
||||||
|
|
||||||
|
|
||||||
def create_cluster():
|
def create_cluster(name):
|
||||||
cluster = ClusterComputeResource()
|
cluster = ClusterComputeResource(name=name)
|
||||||
cluster._add_host(_get_object_refs("HostSystem")[0])
|
cluster._add_host(_get_object_refs("HostSystem")[0])
|
||||||
cluster._add_datastore(_get_object_refs("Datastore")[0])
|
cluster._add_datastore(_get_object_refs("Datastore")[0])
|
||||||
|
cluster._add_resource_pool(_get_object_refs("ResourcePool")[0])
|
||||||
_create_object('ClusterComputeResource', cluster)
|
_create_object('ClusterComputeResource', cluster)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
# Copyright (c) 2012 VMware, Inc.
|
# Copyright (c) 2012 VMware, Inc.
|
||||||
# Copyright (c) 2011 Citrix Systems, Inc.
|
# Copyright (c) 2011 Citrix Systems, Inc.
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
@@ -23,9 +24,13 @@ import copy
|
|||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.openstack.common.gettextutils import _
|
from nova.openstack.common.gettextutils import _
|
||||||
|
from nova.openstack.common import log as logging
|
||||||
from nova.virt.vmwareapi import vim_util
|
from nova.virt.vmwareapi import vim_util
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def build_datastore_path(datastore_name, path):
|
def build_datastore_path(datastore_name, path):
|
||||||
"""Build the datastore compliant path."""
|
"""Build the datastore compliant path."""
|
||||||
return "[%s] %s" % (datastore_name, path)
|
return "[%s] %s" % (datastore_name, path)
|
||||||
@@ -882,3 +887,128 @@ def get_vmdk_volume_disk(hardware_devices):
|
|||||||
for device in hardware_devices:
|
for device in hardware_devices:
|
||||||
if (device.__class__.__name__ == "VirtualDisk"):
|
if (device.__class__.__name__ == "VirtualDisk"):
|
||||||
return device
|
return device
|
||||||
|
|
||||||
|
|
||||||
|
def get_res_pool_ref(session, cluster, node_mo_id):
|
||||||
|
"""Get the resource pool."""
|
||||||
|
if cluster is None:
|
||||||
|
# With no cluster named, use the root resource pool.
|
||||||
|
results = session._call_method(vim_util, "get_objects",
|
||||||
|
"ResourcePool")
|
||||||
|
_cancel_retrieve_if_necessary(session, results)
|
||||||
|
# The 0th resource pool is always the root resource pool on both ESX
|
||||||
|
# and vCenter.
|
||||||
|
res_pool_ref = results.objects[0].obj
|
||||||
|
else:
|
||||||
|
if cluster.value == node_mo_id:
|
||||||
|
# Get the root resource pool of the cluster
|
||||||
|
res_pool_ref = session._call_method(vim_util,
|
||||||
|
"get_dynamic_property",
|
||||||
|
cluster,
|
||||||
|
"ClusterComputeResource",
|
||||||
|
"resourcePool")
|
||||||
|
|
||||||
|
return res_pool_ref
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_cluster_mors(session):
|
||||||
|
"""Get all the clusters in the vCenter."""
|
||||||
|
try:
|
||||||
|
results = session._call_method(vim_util, "get_objects",
|
||||||
|
"ClusterComputeResource", ["name"])
|
||||||
|
_cancel_retrieve_if_necessary(session, results)
|
||||||
|
return results.objects
|
||||||
|
|
||||||
|
except Exception as excep:
|
||||||
|
LOG.warn(_("Failed to get cluster references %s") % excep)
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_res_pool_mors(session):
|
||||||
|
"""Get all the resource pools in the vCenter."""
|
||||||
|
try:
|
||||||
|
results = session._call_method(vim_util, "get_objects",
|
||||||
|
"ResourcePool")
|
||||||
|
|
||||||
|
_cancel_retrieve_if_necessary(session, results)
|
||||||
|
return results.objects
|
||||||
|
except Exception as excep:
|
||||||
|
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
|
||||||
|
|
||||||
|
|
||||||
|
def get_dynamic_property_mor(session, mor_ref, attribute):
|
||||||
|
"""Get the value of an attribute for a given managed object."""
|
||||||
|
return session._call_method(vim_util, "get_dynamic_property",
|
||||||
|
mor_ref, mor_ref._type, attribute)
|
||||||
|
|
||||||
|
|
||||||
|
def find_entity_mor(entity_list, entity_name):
|
||||||
|
"""Returns managed object ref for given cluster or resource pool name."""
|
||||||
|
return [mor for mor in entity_list if mor.propSet[0].val == entity_name]
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_cluster_refs_by_name(session, path_list):
|
||||||
|
"""Get reference to the Cluster, ResourcePool with the path specified.
|
||||||
|
|
||||||
|
The path is the display name. This can be the full path as well.
|
||||||
|
The input will have the list of clusters and resource pool names
|
||||||
|
"""
|
||||||
|
cls = get_all_cluster_mors(session)
|
||||||
|
res = get_all_res_pool_mors(session)
|
||||||
|
path_list = [path.strip() for path in path_list]
|
||||||
|
list_obj = []
|
||||||
|
for entity_path in path_list:
|
||||||
|
# entity_path could be unique cluster and/or resource-pool name
|
||||||
|
res_mor = find_entity_mor(res, entity_path)
|
||||||
|
cls_mor = find_entity_mor(cls, entity_path)
|
||||||
|
cls_mor.extend(res_mor)
|
||||||
|
for mor in cls_mor:
|
||||||
|
list_obj.append((mor.obj, mor.propSet[0].val))
|
||||||
|
return get_dict_mor(session, list_obj)
|
||||||
|
|
||||||
|
|
||||||
|
def get_dict_mor(session, list_obj):
|
||||||
|
"""The input is a list of objects in the form
|
||||||
|
(manage_object,display_name)
|
||||||
|
The managed object will be in the form
|
||||||
|
{ value = "domain-1002", _type = "ClusterComputeResource" }
|
||||||
|
|
||||||
|
Output data format:
|
||||||
|
dict_mors = {
|
||||||
|
'respool-1001': { 'cluster_mor': clusterMor,
|
||||||
|
'res_pool_mor': resourcePoolMor,
|
||||||
|
'name': display_name },
|
||||||
|
'domain-1002': { 'cluster_mor': clusterMor,
|
||||||
|
'res_pool_mor': resourcePoolMor,
|
||||||
|
'name': display_name },
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
dict_mors = {}
|
||||||
|
for obj_ref, path in list_obj:
|
||||||
|
if obj_ref._type == "ResourcePool":
|
||||||
|
# Get owner cluster-ref mor
|
||||||
|
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
|
||||||
|
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
|
||||||
|
'res_pool_mor': obj_ref,
|
||||||
|
'name': path,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Get default resource pool of the cluster
|
||||||
|
res_pool_ref = get_dynamic_property_mor(session,
|
||||||
|
obj_ref, "resourcePool")
|
||||||
|
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
|
||||||
|
'res_pool_mor': res_pool_ref,
|
||||||
|
'name': path,
|
||||||
|
}
|
||||||
|
return dict_mors
|
||||||
|
|
||||||
|
|
||||||
|
def get_mo_id_from_instance(instance):
|
||||||
|
"""Return the managed object ID from the instance.
|
||||||
|
|
||||||
|
The instance['node'] will have the hypervisor_hostname field of the
|
||||||
|
compute node on which the instance exists or will be provisioned.
|
||||||
|
This will be of the form
|
||||||
|
'respool-1001(MyResPoolName)'
|
||||||
|
'domain-1001(MyClusterName)'
|
||||||
|
"""
|
||||||
|
return instance['node'].partition('(')[0]
|
||||||
|
|||||||
@@ -180,7 +180,9 @@ class VMwareVMOps(object):
|
|||||||
disk_type, vif_model) = _get_image_properties()
|
disk_type, vif_model) = _get_image_properties()
|
||||||
|
|
||||||
vm_folder_ref = self._get_vmfolder_ref()
|
vm_folder_ref = self._get_vmfolder_ref()
|
||||||
res_pool_ref = self._get_res_pool_ref()
|
node_mo_id = vm_util.get_mo_id_from_instance(instance)
|
||||||
|
res_pool_ref = vm_util.get_res_pool_ref(self._session,
|
||||||
|
self._cluster, node_mo_id)
|
||||||
|
|
||||||
def _get_vif_infos():
|
def _get_vif_infos():
|
||||||
vif_infos = []
|
vif_infos = []
|
||||||
|
|||||||
Reference in New Issue
Block a user