sync: pick up some small changes from upstream

* driver.get_info received a new argument, which we don't use.
  The good news is that the manager expects a TypeError
* driver.extend_volume now accepts the new volume size
* the driver shouldn't set the 'is_public' image property when
  taking snapshots. This is already handled outside the driver
* missing whitespace between words in log message
* avoid using utils.execute, use processutils.execute
* update os_win utils "auto-spec" helper (we're relying a bit too
  much on os-win internals, which meanwhile have changed)
* nova dropped the helper method that was merging allocations so
  we'll have to include it in compute_hyperv. Note that we only
  use it for the cluster driver.

Change-Id: I0b59a118764421ec9daba3f3732f45ec9cb7287b
This commit is contained in:
Lucian Petrut 2019-03-25 10:01:44 +02:00
parent 87f453764f
commit 05a55ce5c9
10 changed files with 85 additions and 21 deletions

View File

@ -201,7 +201,7 @@ class HyperVDriver(driver.ComputeDriver):
"""Cleanup after instance being destroyed by Hypervisor."""
self.unplug_vifs(instance, network_info)
def get_info(self, instance):
def get_info(self, instance, use_cache=True):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
@ -221,7 +221,7 @@ class HyperVDriver(driver.ComputeDriver):
instance,
update_device_metadata=True)
def extend_volume(self, connection_info, instance):
def extend_volume(self, connection_info, instance, requested_size):
self._volumeops.extend_volume(connection_info)
def get_volume_connector(self, instance):

View File

@ -69,7 +69,7 @@ class SerialProxy(threading.Thread):
self._sock.listen(1)
except socket.error as err:
self._sock.close()
msg = (_('Failed to initialize serial proxy on'
msg = (_('Failed to initialize serial proxy on '
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s. Error: %(error)s') %
{'addr': self._addr,

View File

@ -43,8 +43,7 @@ class SnapshotOps(object):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, image_id)
image_metadata = {"is_public": False,
"disk_format": image_format,
image_metadata = {"disk_format": image_format,
"container_format": "bare"}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f,

View File

@ -16,7 +16,6 @@
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@ -65,7 +64,7 @@ class PlacementUtils(object):
LOG.info("Merging existing allocations for consumer %s on "
"provider %s: %s.",
consumer_uuid, new_rp_uuid, allocations)
scheduler_utils.merge_resources(
self.merge_resources(
allocations[new_rp_uuid]['resources'],
allocations[old_rp_uuid]['resources'])
else:
@ -110,3 +109,20 @@ class PlacementUtils(object):
consumer_uuid=consumer, error=resp.text)
return resp.json()
@staticmethod
def merge_resources(original_resources, new_resources, sign=1):
"""Merge a list of new resources with existing resources.
Either add the resources (if sign is 1) or subtract (if sign is -1).
If the resulting value is 0 do not include the resource in the results.
"""
all_keys = set(original_resources.keys()) | set(new_resources.keys())
for key in all_keys:
value = (original_resources.get(key, 0) +
(sign * new_resources.get(key, 0)))
if value:
original_resources[key] = value
else:
original_resources.pop(key, None)

View File

@ -794,15 +794,16 @@ class VMOps(object):
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = self._pathutils.get_configdrive_path(
instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
processutils.execute(
CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso

View File

@ -39,7 +39,7 @@ class HyperVBaseTestCase(test.NoDBTestCase):
@staticmethod
def _mock_get_class(class_type, *args, **kwargs):
existing_classes = utilsfactory.utils_map[class_type]
class_info = list(existing_classes.values())[0]
class_info = existing_classes[0]
imported_class = importutils.import_class(class_info['path'])
return mock.Mock(autospec=imported_class)

View File

@ -239,7 +239,8 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
def test_extend_volume(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.driver.extend_volume(
mock.sentinel.connection_info, mock_instance)
mock.sentinel.connection_info, mock_instance,
mock.sentinel.requested_size)
self.driver._volumeops.extend_volume.assert_called_once_with(
mock.sentinel.connection_info)

View File

@ -43,8 +43,7 @@ class SnapshotOpsTestCase(test_base.HyperVBaseTestCase):
@mock.patch('nova.image.glance.get_remote_image_service')
def test_save_glance_image(self, mock_get_remote_image_service):
fake_fmt = 'fake_fmt'
image_metadata = {"is_public": False,
"disk_format": fake_fmt,
image_metadata = {"disk_format": fake_fmt,
"container_format": "bare"}
glance_image_service = mock.MagicMock()
self._vhdutils.get_vhd_format.return_value = fake_fmt.upper()

View File

@ -1128,7 +1128,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
@mock.patch('oslo_concurrency.processutils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect,

View File

@ -156,3 +156,51 @@ class PlacementUtilsTestCase(test_base.HyperVBaseTestCase):
exception.ConsumerAllocationRetrievalFailed,
self.placement._get_allocs_for_consumer,
self.context, mock.sentinel.consumer, mock.sentinel.version)
def test_merge_resources(self):
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'CUSTOM_FOO': 1,
}
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072, 'CUSTOM_FOO': 1,
}
saved_orig = dict(resources)
self.placement.merge_resources(resources, new_resources)
# Check to see that we've doubled our resources
self.assertEqual(doubled, resources)
# and then removed those doubled resources
self.placement.merge_resources(resources, saved_orig, -1)
self.assertEqual(new_resources, resources)
def test_merge_resources_zero(self):
# Test 0 value resources are ignored.
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 0,
}
# The result should not include the zero valued resource.
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072,
}
self.placement.merge_resources(resources, new_resources)
self.assertEqual(doubled, resources)
def test_merge_resources_original_zeroes(self):
# Confirm that merging that result in a zero in the original
# excludes the zeroed resource class.
resources = {
'VCPU': 3, 'MEMORY_MB': 1023, 'DISK_GB': 1,
}
new_resources = {
'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 1,
}
merged = {
'VCPU': 2, 'MEMORY_MB': 511,
}
self.placement.merge_resources(resources, new_resources, -1)
self.assertEqual(merged, resources)