VMware: Remove host argument to ds_util.get_datastore()
Since the removal of the ESX driver, we no longer pass host to ds_util.get_datastore(), and we always pass a cluster. This change removes the host argument, and assumes we always get a cluster. This is a relatively simple change, but it has 2 knock-on effects to tests. Firstly, almost all tests using get_datastore were passing host and cluster as None, None, which excercised a code path which only made a single vim call. This meant that passing in fake.FakeObjectRetrievalSession() worked well. That code path was removed (because it was never actually called), and the new code path makes either 2 or 3 vim calls. This requires more complex faking of vim calls, which we do in _mock_get_datastore_calls(). Note that this is only faking the vim calls, so we're still testing get_datastore, not the fake function. The second knock-on effect is to the incredibly fragile test_spawn_mask_block_device_info_password in test_vmops. This test has to allow enough of spawn() to run to generate a debug log message, as it is testing that log message. This change caused an irrelevant exception to be raised in _get_vm_config_info, which prevented it getting as far as the log message. We poke the test a bit so it doesn't break any more. Finally, we entirely remove test_get_datastore_no_host_in_cluster because: 1. the previous code did 'if datastore_ret:' so returning '' and None, as returned by test_get_datastore_without_datastore, were previously functionally identical. Incidentally, this change also fixes that. 2. its purpose wasn't clear anyway. Change-Id: I37f1ea126ae2e7f86cfd29d399fba1c527f18dc0
This commit is contained in:
@@ -161,6 +161,51 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
'fake-browser', ds_path, 'fake-file')
|
||||
self.assertFalse(file_exists)
|
||||
|
||||
def _mock_get_datastore_calls(self, *datastores):
|
||||
"""Mock vim_util calls made by get_datastore."""
|
||||
|
||||
datastores_i = [None]
|
||||
|
||||
# For the moment, at least, this list of datastores is simply passed to
|
||||
# get_properties_for_a_collection_of_objects, which we mock below. We
|
||||
# don't need to over-complicate the fake function by worrying about its
|
||||
# contents.
|
||||
fake_ds_list = ['fake-ds']
|
||||
|
||||
def fake_call_method(module, method, *args, **kwargs):
|
||||
# Mock the call which returns a list of datastores for the cluster
|
||||
if (module == ds_util.vim_util and
|
||||
method == 'get_dynamic_property' and
|
||||
args == ('fake-cluster', 'ClusterComputeResource',
|
||||
'datastore')):
|
||||
fake_ds_mor = fake.DataObject()
|
||||
fake_ds_mor.ManagedObjectReference = fake_ds_list
|
||||
return fake_ds_mor
|
||||
|
||||
# Return the datastore result sets we were passed in, in the order
|
||||
# given
|
||||
if (module == ds_util.vim_util and
|
||||
method == 'get_properties_for_a_collection_of_objects' and
|
||||
args[0] == 'Datastore' and
|
||||
args[1] == fake_ds_list):
|
||||
# Start a new iterator over given datastores
|
||||
datastores_i[0] = iter(datastores)
|
||||
return datastores_i[0].next()
|
||||
|
||||
# Continue returning results from the current iterator.
|
||||
if (module == ds_util.vim_util and
|
||||
method == 'continue_to_get_objects'):
|
||||
try:
|
||||
return datastores_i[0].next()
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
# Sentinel that get_datastore's use of vim has changed
|
||||
self.fail('Unexpected vim call in get_datastore: %s' % method)
|
||||
|
||||
return mock.patch.object(self.session, '_call_method',
|
||||
side_effect=fake_call_method)
|
||||
|
||||
def test_get_datastore(self):
|
||||
fake_objects = fake.FakeRetrieveResult()
|
||||
fake_objects.add_object(fake.Datastore())
|
||||
@@ -168,9 +213,9 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
False, "normal"))
|
||||
fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
|
||||
True, "inMaintenance"))
|
||||
result = ds_util.get_datastore(
|
||||
fake.FakeObjectRetrievalSession(fake_objects))
|
||||
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
result = ds_util.get_datastore(self.session, 'fake-cluster')
|
||||
self.assertEqual("fake-ds", result.name)
|
||||
self.assertEqual(units.Ti, result.capacity)
|
||||
self.assertEqual(500 * units.Gi, result.freespace)
|
||||
@@ -182,9 +227,10 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
fake_objects.add_object(fake.Datastore("openstack-ds0"))
|
||||
fake_objects.add_object(fake.Datastore("fake-ds0"))
|
||||
fake_objects.add_object(fake.Datastore("fake-ds1"))
|
||||
result = ds_util.get_datastore(
|
||||
fake.FakeObjectRetrievalSession(fake_objects), None, None,
|
||||
datastore_valid_regex)
|
||||
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
result = ds_util.get_datastore(self.session, 'fake-cluster',
|
||||
datastore_valid_regex)
|
||||
self.assertEqual("openstack-ds0", result.name)
|
||||
|
||||
def test_get_datastore_with_token(self):
|
||||
@@ -196,8 +242,9 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
fake1 = fake.FakeRetrieveResult()
|
||||
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
|
||||
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
|
||||
result = ds_util.get_datastore(
|
||||
fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex)
|
||||
|
||||
with self._mock_get_datastore_calls(fake0, fake1):
|
||||
result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
|
||||
self.assertEqual("ds2", result.name)
|
||||
|
||||
def test_get_datastore_with_list(self):
|
||||
@@ -207,9 +254,10 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
fake_objects.add_object(fake.Datastore("openstack-ds0"))
|
||||
fake_objects.add_object(fake.Datastore("openstack-ds1"))
|
||||
fake_objects.add_object(fake.Datastore("openstack-ds2"))
|
||||
result = ds_util.get_datastore(
|
||||
fake.FakeObjectRetrievalSession(fake_objects), None, None,
|
||||
datastore_valid_regex)
|
||||
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
result = ds_util.get_datastore(self.session, 'fake-cluster',
|
||||
datastore_valid_regex)
|
||||
self.assertNotEqual("openstack-ds1", result.name)
|
||||
|
||||
def test_get_datastore_with_regex_error(self):
|
||||
@@ -224,9 +272,9 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
# assertRaisesRegExp would have been a good choice instead of
|
||||
# try/catch block, but it's available only from Py 2.7.
|
||||
try:
|
||||
ds_util.get_datastore(
|
||||
fake.FakeObjectRetrievalSession(fake_objects), None, None,
|
||||
datastore_invalid_regex)
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
ds_util.get_datastore(self.session, 'fake-cluster',
|
||||
datastore_invalid_regex)
|
||||
except exception.DatastoreNotFound as e:
|
||||
self.assertEqual(exp_message, e.args[0])
|
||||
else:
|
||||
@@ -234,20 +282,10 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
"message: %s" % exp_message)
|
||||
|
||||
def test_get_datastore_without_datastore(self):
|
||||
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
fake.FakeObjectRetrievalSession(None), host="fake-host")
|
||||
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
|
||||
|
||||
def test_get_datastore_no_host_in_cluster(self):
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
|
||||
|
||||
def test_get_datastore_inaccessible_ds(self):
|
||||
data_store = fake.Datastore()
|
||||
data_store.set("summary.accessible", False)
|
||||
@@ -255,9 +293,10 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
fake_objects = fake.FakeRetrieveResult()
|
||||
fake_objects.add_object(data_store)
|
||||
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
fake.FakeObjectRetrievalSession(fake_objects))
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
self.session, 'fake-cluster')
|
||||
|
||||
def test_get_datastore_ds_in_maintenance(self):
|
||||
data_store = fake.Datastore()
|
||||
@@ -266,9 +305,10 @@ class DsUtilTestCase(test.NoDBTestCase):
|
||||
fake_objects = fake.FakeRetrieveResult()
|
||||
fake_objects.add_object(data_store)
|
||||
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
fake.FakeObjectRetrievalSession(fake_objects))
|
||||
with self._mock_get_datastore_calls(fake_objects):
|
||||
self.assertRaises(exception.DatastoreNotFound,
|
||||
ds_util.get_datastore,
|
||||
self.session, 'fake-cluster')
|
||||
|
||||
def _test_is_datastore_valid(self, accessible=True,
|
||||
maintenance_mode="normal",
|
||||
|
||||
@@ -512,10 +512,11 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
|
||||
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
|
||||
|
||||
@mock.patch.object(vmops.LOG, 'debug')
|
||||
@mock.patch('nova.virt.vmwareapi.volumeops.VMwareVolumeOps'
|
||||
'.attach_root_volume')
|
||||
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
|
||||
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
|
||||
def test_spawn_mask_block_device_info_password(self,
|
||||
mock_attach_root_volume,
|
||||
mock_build_virtual_machine,
|
||||
mock_get_vm_config_info,
|
||||
mock_debug):
|
||||
# Very simple test that just ensures block_device_info auth_password
|
||||
# is masked when logged; the rest of the test just fails out early.
|
||||
@@ -534,18 +535,19 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
|
||||
|
||||
mock_debug.side_effect = fake_debug
|
||||
self.flags(flat_injected=False, vnc_enabled=False)
|
||||
mock_attach_root_volume.side_effect = Exception
|
||||
|
||||
# Call spawn(). We don't care what it does as long as it generates
|
||||
# the log message, which we check below.
|
||||
try:
|
||||
self._vmops.spawn(
|
||||
self._context, self._instance, {},
|
||||
injected_files=None, admin_password=None,
|
||||
network_info=[], block_device_info=bdi
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
with mock.patch.object(self._vmops, '_volumeops') as mock_volumeops:
|
||||
mock_volumeops.attach_root_volume.side_effect = Exception
|
||||
try:
|
||||
self._vmops.spawn(
|
||||
self._context, self._instance, {},
|
||||
injected_files=None, admin_password=None,
|
||||
network_info=[], block_device_info=bdi
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check that the relevant log message was generated, and therefore
|
||||
# that we checked it was scrubbed
|
||||
|
||||
@@ -235,36 +235,24 @@ def _is_datastore_valid(propdict, datastore_regex):
|
||||
datastore_regex.match(propdict['summary.name'])))
|
||||
|
||||
|
||||
def get_datastore(session, cluster=None, host=None, datastore_regex=None):
|
||||
def get_datastore(session, cluster, datastore_regex=None):
|
||||
"""Get the datastore list and choose the most preferable one."""
|
||||
if cluster is None and host is None:
|
||||
data_stores = session._call_method(vim_util, "get_objects",
|
||||
"Datastore", ["summary.type", "summary.name",
|
||||
"summary.capacity", "summary.freeSpace",
|
||||
"summary.accessible",
|
||||
"summary.maintenanceMode"])
|
||||
else:
|
||||
if cluster is not None:
|
||||
datastore_ret = session._call_method(
|
||||
vim_util,
|
||||
"get_dynamic_property", cluster,
|
||||
"ClusterComputeResource", "datastore")
|
||||
else:
|
||||
datastore_ret = session._call_method(
|
||||
vim_util,
|
||||
"get_dynamic_property", host,
|
||||
"HostSystem", "datastore")
|
||||
datastore_ret = session._call_method(
|
||||
vim_util,
|
||||
"get_dynamic_property", cluster,
|
||||
"ClusterComputeResource", "datastore")
|
||||
if datastore_ret is None:
|
||||
raise exception.DatastoreNotFound()
|
||||
|
||||
data_store_mors = datastore_ret.ManagedObjectReference
|
||||
data_stores = session._call_method(vim_util,
|
||||
"get_properties_for_a_collection_of_objects",
|
||||
"Datastore", data_store_mors,
|
||||
["summary.type", "summary.name",
|
||||
"summary.capacity", "summary.freeSpace",
|
||||
"summary.accessible",
|
||||
"summary.maintenanceMode"])
|
||||
|
||||
if not datastore_ret:
|
||||
raise exception.DatastoreNotFound()
|
||||
data_store_mors = datastore_ret.ManagedObjectReference
|
||||
data_stores = session._call_method(vim_util,
|
||||
"get_properties_for_a_collection_of_objects",
|
||||
"Datastore", data_store_mors,
|
||||
["summary.type", "summary.name",
|
||||
"summary.capacity", "summary.freeSpace",
|
||||
"summary.accessible",
|
||||
"summary.maintenanceMode"])
|
||||
best_match = None
|
||||
while data_stores:
|
||||
best_match = _select_datastore(data_stores, best_match,
|
||||
|
||||
@@ -377,7 +377,7 @@ class VMwareVMOps(object):
|
||||
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
|
||||
reason=reason)
|
||||
datastore = ds_util.get_datastore(
|
||||
self._session, self._cluster, None, self._datastore_regex)
|
||||
self._session, self._cluster, self._datastore_regex)
|
||||
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
|
||||
|
||||
return VirtualMachineInstanceConfigInfo(instance,
|
||||
@@ -1014,7 +1014,7 @@ class VMwareVMOps(object):
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
ds_ref = ds_util.get_datastore(
|
||||
self._session, self._cluster, host_ref,
|
||||
self._session, self._cluster,
|
||||
datastore_regex=self._datastore_regex).ref
|
||||
dc_info = self.get_datacenter_ref_and_name(ds_ref)
|
||||
# 3. Clone the VM for instance
|
||||
|
||||
Reference in New Issue
Block a user