Remove ESXDriver from Juno.

1. Removes the VMwareESXDriver code in nova/virt/vmwareapi/driver.py
by either deleting the redundant methods or moving them to
VMwareVCDriver.
2. Changes the test cases to use VMwareVCDriver and also removes
duplicate test cases which were previously being testing both
VMwareESXDriver and VMwareVCDriver to just the latter.

DocImpact
Closes-Bug: #1346637
Change-Id: I718fc0ee67dbd625af00c20fa4e34b8a35015437
This commit is contained in:
Akash Gangil 2014-08-02 05:37:38 -07:00
parent 5cc11fcf64
commit 1deb31f85a
6 changed files with 108 additions and 452 deletions

View File

@ -48,7 +48,7 @@ def log_db_contents(msg=None):
{'text': msg or "", 'content': pprint.pformat(_db_content)})
def reset(vc=False):
def reset():
"""Resets the db contents."""
cleanup()
create_network()
@ -56,16 +56,13 @@ def reset(vc=False):
create_host_storage_system()
ds_ref1 = create_datastore('ds1', 1024, 500)
create_host(ds_ref=ds_ref1)
if vc:
ds_ref2 = create_datastore('ds2', 1024, 500)
create_host(ds_ref=ds_ref2)
ds_ref2 = create_datastore('ds2', 1024, 500)
create_host(ds_ref=ds_ref2)
create_datacenter('dc1', ds_ref1)
if vc:
create_datacenter('dc2', ds_ref2)
create_datacenter('dc2', ds_ref2)
create_res_pool()
if vc:
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():

View File

@ -47,7 +47,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
host_password='test_pass',
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False)
vmwareapi_fake.reset(vc=True)
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)

View File

@ -54,7 +54,6 @@ from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.virt.vmwareapi import stubs
from nova import utils as nova_utils
from nova.virt import driver as v_driver
from nova.virt import fake
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
@ -307,10 +306,12 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
super(VMwareAPIVMTestCase, self).setUp()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(host_ip='test_url',
cluster_name = 'test_cluster'
cluster_name2 = 'test_cluster2'
self.flags(cluster_name=[cluster_name, cluster_name2],
host_ip='test_url',
host_username='test_username',
host_password='test_pass',
datastore_regex='.*',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False,
@ -318,13 +319,17 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
self.node_name = 'test_url'
self.ds = 'ds1'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self.stubs)
vmwareapi_fake.reset()
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI)
self.conn = driver.VMwareVCDriver(None, False)
self.node_name = self.conn._resources.keys()[0]
self.node_name2 = self.conn._resources.keys()[1]
if cluster_name2 in self.node_name2:
self.ds = 'ds1'
else:
self.ds = 'ds2'
self.vim = vmwareapi_fake.FakeVim()
# NOTE(vish): none of the network plugging code is actually
@ -341,7 +346,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
}
self.fake_image_uuid = self.image['id']
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.vnc_host = 'test_url'
self.vnc_host = 'ha-host'
self._set_exception_vars()
self.instance_without_compute = {'node': None,
'vm_state': 'building',
@ -1166,12 +1171,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk()
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False)
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
@ -1663,15 +1662,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.test_vm_ref = None
self.test_device_name = None
def test_pause(self):
# Tests that the VMwareESXDriver does not implement the pause method.
self.assertRaises(NotImplementedError, self.conn.pause, instance=None)
def test_unpause(self):
# Tests that the VMwareESXDriver does not implement the unpause method.
self.assertRaises(NotImplementedError, self.conn.unpause,
instance=None)
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
@ -1723,12 +1713,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
image_meta=None,
power_on=power_on)
def test_confirm_migration(self):
self._create_vm()
self.assertRaises(NotImplementedError,
self.conn.confirm_migration, self.context,
self.instance, None)
def _test_finish_revert_migration(self, power_on):
self._create_vm()
# Ensure ESX driver throws an error
@ -1738,18 +1722,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
instance=self.instance,
network_info=None)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
def test_get_console_pool_info(self):
info = self.conn.get_console_pool_info("console_type")
self.assertEqual(info['address'], 'test_url')
self.assertEqual(info['username'], 'test_username')
self.assertEqual(info['password'], 'test_pass')
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
@ -1777,9 +1749,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.context,
self.instance)
def test_host_ip_addr(self):
self.assertEqual(self.conn.get_host_ip_addr(), "test_url")
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
@ -2084,65 +2053,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self._cached_files_exist()
class VMwareAPIHostTestCase(test.NoDBTestCase,
test_driver.DriverAPITestHelper):
"""Unit tests for Vmware API host calls."""
def setUp(self):
super(VMwareAPIHostTestCase, self).setUp()
self.flags(image_cache_subdirectory_name='vmware_base')
vm_util.vm_refs_cache_reset()
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass', group='vmware')
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareESXDriver(False)
def tearDown(self):
super(VMwareAPIHostTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_public_api_signatures(self):
self.assertPublicAPISignatures(self.conn)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEqual(stats['vcpus'], 16)
self.assertEqual(stats['disk_total'], 1024)
self.assertEqual(stats['disk_available'], 500)
self.assertEqual(stats['disk_used'], 1024 - 500)
self.assertEqual(stats['host_memory_total'], 1024)
self.assertEqual(stats['host_memory_free'], 1024 - 500)
self.assertEqual(stats['hypervisor_version'], 5000000)
supported_instances = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self.assertEqual(stats['supported_instances'], supported_instances)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self._test_host_action(self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode, True)
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode, False)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual('Please refer to test_url for the uptime', result)
class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase,
test_driver.DriverAPITestHelper):
@ -2156,7 +2066,7 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase,
task_poll_interval=10, datastore_regex='.*', group='vmware')
self.flags(vnc_enabled=False,
image_cache_subdirectory_name='vmware_base')
vmwareapi_fake.reset(vc=True)
vmwareapi_fake.reset()
self.conn = driver.VMwareVCDriver(None, False)
self.node_name = self.conn._resources.keys()[0]
self.node_name2 = self.conn._resources.keys()[1]
@ -2201,36 +2111,6 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase,
vcdriver._session._create_session.side_effect = side_effect
return vcdriver
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
def test_init_host_and_cleanup_host(self, mock_init):
vcdriver = self._setup_mocks_for_session(mock_init)
vcdriver.init_host("foo")
vcdriver._session._create_session.assert_called_once_with()
vcdriver.cleanup_host("foo")
vcdriver._session.vim.client.service.Logout.assert_called_once_with(
mock.ANY)
@mock.patch('nova.virt.vmwareapi.driver.LOG')
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
def test_cleanup_host_with_no_login(self, mock_init, mock_logger):
vcdriver = self._setup_mocks_for_session(mock_init)
vcdriver.init_host("foo")
vcdriver._session._create_session.assert_called_once_with()
# Not logged in...
# observe that no exceptions were thrown
mock_sc = mock.Mock()
vcdriver._session.vim.retrieve_service_content.return_value = mock_sc
web_fault = suds.WebFault(mock.Mock(), mock.Mock())
vcdriver._session.vim.client.service.Logout.side_effect = web_fault
vcdriver.cleanup_host("foo")
# assert that the mock Logout method was never called
vcdriver._session.vim.client.service.Logout.assert_called_once_with(
mock.ANY)
mock_logger.debug.assert_called_once_with(mock.ANY)
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'action')

View File

@ -43,7 +43,7 @@ class VMwareVIMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVIMUtilTestCase, self).setUp()
fake.reset(vc=True)
fake.reset()
self.vim = fake.FakeVim()
self.vim._login()

View File

@ -13,10 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`vmwareapi` -- Nova support for VMware ESX/vCenter through VMware API.
:mod:`vmwareapi` -- Nova support for VMware vCenter through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
from nova.virt.vmwareapi import driver
# VMwareESXDriver is deprecated in Juno. This property definition
# allows those configurations to work which reference it while
# logging a deprecation warning
VMwareESXDriver = driver.VMwareESXDriver
VMwareVCDriver = driver.VMwareVCDriver

View File

@ -16,7 +16,7 @@
# under the License.
"""
A connection to the VMware ESX/vCenter platform.
A connection to the VMware vCenter platform.
"""
import re
@ -25,10 +25,9 @@ import time
from eventlet import event
from oslo.config import cfg
import suds
from nova import exception
from nova.i18n import _, _LC
from nova.i18n import _, _LC, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
@ -47,19 +46,18 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('host_ip',
help='Hostname or IP address for connection to VMware ESX/VC '
help='Hostname or IP address for connection to VMware VC '
'host.'),
cfg.IntOpt('host_port',
default=443,
help='Port for connection to VMware ESX/VC host.'),
help='Port for connection to VMware VC host.'),
cfg.StrOpt('host_username',
help='Username for connection to VMware ESX/VC host.'),
help='Username for connection to VMware VC host.'),
cfg.StrOpt('host_password',
help='Password for connection to VMware ESX/VC host.',
help='Password for connection to VMware VC host.',
secret=True),
cfg.MultiStrOpt('cluster_name',
help='Name of a VMware Cluster ComputeResource. Used only if '
'compute_driver is vmwareapi.VMwareVCDriver.'),
help='Name of a VMware Cluster ComputeResource.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
@ -86,38 +84,39 @@ CONF.register_opts(vmwareapi_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
# The following class was removed in the transition from Icehouse to
# Juno, but may still be referenced in configuration files. The
# following stub allow those configurations to work while logging a
# deprecation warning.
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
def _do_deprecation_warning(self):
LOG.warn(_LW('The VMware ESX driver is now deprecated and has been '
'removed in the Juno release. The VC driver will remain '
'and continue to be supported.'))
def __init__(self, virtapi, read_only=False, scheme="https"):
self._do_deprecation_warning()
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
}
# VMwareAPI has both ESXi and vCenter API sets.
# The ESXi API are a proper sub-set of the vCenter API.
# That is to say, nearly all valid ESXi calls are
# valid vCenter calls. There are some small edge-case
# exceptions regarding VNC, CIM, User management & SSO.
def _do_deprecation_warning(self):
LOG.warning(_('The VMware ESX driver is now deprecated and will be '
'removed in the Juno release. The VC driver will remain '
'and continue to be supported.'))
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
self._do_deprecation_warning()
self._host_ip = CONF.vmware.host_ip
if not (self._host_ip or CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, "
"host_username "
"and host_password to use "
"compute_driver=vmwareapi.VMwareESXDriver or "
"vmwareapi.VMwareVCDriver"))
super(VMwareVCDriver, self).__init__(virtapi)
self._datastore_regex = None
if CONF.vmware.datastore_regex:
@ -129,280 +128,11 @@ class VMwareESXDriver(driver.ComputeDriver):
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
self._volumeops,
datastore_regex=self._datastore_regex)
self._host = host.Host(self._session)
self._host_state = None
# TODO(hartsocks): back-off into a configuration test module.
if CONF.vmware.use_linked_clone is None:
raise error_util.UseLinkedCloneConfigurationFault()
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session,
self._host_ip)
return self._host_state
def init_host(self, host):
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
# NOTE(hartsocks): we lean on the init_host to force the vim object
# to not be None.
vim = self._session.vim
service_content = vim.get_service_content()
session_manager = service_content.sessionManager
try:
vim.client.service.Logout(session_manager)
except suds.WebFault:
LOG.debug("No vSphere session was open during cleanup_host.")
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance['node']:
return
self._vmops.destroy(instance, destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
instances = self.list_instances()
if instance['uuid'] not in instances:
LOG.warn(_('Instance cannot be found in host, or in an unknown'
'state.'), instance=instance)
else:
state = vm_util.get_vm_state_from_name(self._session,
instance['uuid'])
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._vmops.get_diagnostics(instance)
return data
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._vmops.get_instance_diagnostics(instance)
return data
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Retrieves the IP address of the ESX host."""
return self._host_ip
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance,
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance,
mountpoint)
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': CONF.vmware.host_ip,
'username': CONF.vmware.host_username,
'password': CONF.vmware.host_password}
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
return self._get_available_resources(host_stats)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return self._host.host_power_action(host, action)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
return 'Please refer to %s for the uptime' % CONF.vmware.host_ip
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
uuids = self._vmops.list_instances()
return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)]
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self._vmops.manage_image_cache(context, all_instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return self._vmops.instance_exists(instance)
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
self._vmops.attach_interface(instance, image_meta, vif)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(instance, vif)
class VMwareVCDriver(VMwareESXDriver):
"""The VC host connection object."""
# The vCenter driver includes several additional VMware vSphere
# capabilities that include API that act on hosts or groups of
# hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def _do_deprecation_warning(self):
# Driver validated by VMware's Minesweeper CI
pass
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi, scheme)
# Get the list of clusters to be used
self._cluster_names = CONF.vmware.cluster_name
self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
@ -417,8 +147,8 @@ class VMwareVCDriver(VMwareESXDriver):
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
missing_clusters = set(self._cluster_names) - set(clusters_found)
if missing_clusters:
LOG.warn(_("The following clusters could not be found in the"
" vCenter %s") % list(missing_clusters))
LOG.warn(_LW("The following clusters could not be found in the "
"vCenter %s") % list(missing_clusters))
# The _resources is used to maintain the vmops, volumeops and vcstate
# objects per cluster
@ -434,6 +164,38 @@ class VMwareVCDriver(VMwareESXDriver):
self._volumeops = self._resources.get(first_cluster).get('volumeops')
self._vc_state = self._resources.get(first_cluster).get('vcstate')
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
instances = self.list_instances()
if instance['uuid'] not in instances:
LOG.warn(_LW('Instance cannot be found in host, or in an unknown'
'state.'), instance=instance)
else:
state = vm_util.get_vm_state_from_name(self._session,
instance['uuid'])
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
uuids = self._vmops.list_instances()
return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)]
def list_instances(self):
"""List VM instances from all nodes."""
instances = []
@ -492,9 +254,7 @@ class VMwareVCDriver(VMwareESXDriver):
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# In this situation, ESXi and vCenter require different
# API logic to create a valid VNC console connection object.
# In specific, vCenter does not actually run the VNC service
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_vnc_console(instance)
@ -584,6 +344,22 @@ class VMwareVCDriver(VMwareESXDriver):
resource = self._get_resource_for_node(nodename)
return resource['vcstate']
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
@ -822,7 +598,7 @@ class VMwareVCDriver(VMwareESXDriver):
class VMwareAPISession(object):
"""Sets up a session with the VC/ESX host and handles all
"""Sets up a session with the VC host and handles all
the calls made to the host.
"""
@ -848,7 +624,7 @@ class VMwareAPISession(object):
port=self._host_port)
def _create_session(self):
"""Creates a session with the VC/ESX host."""
"""Creates a session with the VC host."""
delay = 1