Remove support of creating volume from Nova.

Current Nova server volume support is broken. Nova also declared the
'os-volumes_boot' will be deprecated in the future. As creating volumes
by cinderclient has been supoorted for a long time, we could just drop
support of Nova server volume.

This patch also migrate to the new block_device_mapping_v2 parameter of
Nova servers creating API.

Closes-Bug: #1673408
Change-Id: I74d86241a5a0d0b1804b959313432168f68faf89
Signed-off-by: Zhao Chao <zhaochao1984@gmail.com>
This commit is contained in:
Zhao Chao 2018-02-08 23:05:19 +08:00
parent 9e5186b95b
commit 29362a18a4
7 changed files with 142 additions and 165 deletions

View File

@ -104,9 +104,6 @@ agent_call_low_timeout = 5
agent_call_high_timeout = 150
agent_replication_snapshot_timeout = 36000
# Whether to use nova's contrib api for create server with volume
use_nova_server_volume = False
# Config option for filtering the IP address that DNS uses
# For nova-network, set this to the appropriate network label defined in nova
# For neutron, set this to .* since users can specify custom network labels

View File

@ -92,7 +92,6 @@ agent_call_low_timeout = 5
agent_call_high_timeout = 150
server_delete_time_out=10
use_nova_server_volume = False
dns_time_out = 120
resize_time_out = 120
revert_time_out = 120

View File

@ -0,0 +1,7 @@
---
fixes:
- |
Remove support of creating volume from Nova. The former configuration
"use_nova_server_volume" is not used any more, for creating volumes,
cinderclient will be always used.
Fixes bug #1673408.

View File

@ -220,9 +220,6 @@ common_opts = [
cfg.BoolOpt('use_nova_server_config_drive', default=True,
help='Use config drive for file injection when booting '
'instance.'),
cfg.BoolOpt('use_nova_server_volume', default=False,
help='Whether to provision a Cinder volume for the '
'Nova instance.'),
cfg.StrOpt('device_path', default='/dev/vdb',
help='Device path for volume if volume support is enabled.'),
cfg.StrOpt('default_datastore', default=None,

View File

@ -483,29 +483,17 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
files = self.get_injected_files(datastore_manager)
cinder_volume_type = volume_type or CONF.cinder_volume_type
if CONF.use_nova_server_volume:
volume_info = self._create_server_volume(
flavor['id'],
image_id,
security_groups,
datastore_manager,
volume_size,
availability_zone,
nics,
files,
scheduler_hints)
else:
volume_info = self._create_server_volume_individually(
flavor['id'],
image_id,
security_groups,
datastore_manager,
volume_size,
availability_zone,
nics,
files,
cinder_volume_type,
scheduler_hints)
volume_info = self._create_server_volume(
flavor['id'],
image_id,
security_groups,
datastore_manager,
volume_size,
availability_zone,
nics,
files,
cinder_volume_type,
scheduler_hints)
config = self._render_config(flavor)
@ -730,51 +718,6 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
'srv_msg': server_message})
return False
def _create_server_volume(self, flavor_id, image_id, security_groups,
datastore_manager, volume_size,
availability_zone, nics, files,
scheduler_hints):
LOG.debug("Begin _create_server_volume for id: %s", self.id)
try:
userdata = self._prepare_userdata(datastore_manager)
name = self.hostname or self.name
volume_desc = ("datastore volume for %s" % self.id)
volume_name = ("datastore-%s" % self.id)
volume_ref = {'size': volume_size, 'name': volume_name,
'description': volume_desc}
config_drive = CONF.use_nova_server_config_drive
server = self.nova_client.servers.create(
name, image_id, flavor_id,
files=files, volume=volume_ref,
security_groups=security_groups,
availability_zone=availability_zone,
nics=nics, config_drive=config_drive,
userdata=userdata, scheduler_hints=scheduler_hints)
server_dict = server._info
LOG.debug("Created new compute instance %(server_id)s "
"for id: %(id)s\nServer response: %(response)s",
{'server_id': server.id, 'id': self.id,
'response': server_dict})
volume_id = None
for volume in server_dict.get('os:volumes', []):
volume_id = volume.get('id')
# Record the server ID and volume ID in case something goes wrong.
self.update_db(compute_instance_id=server.id, volume_id=volume_id)
except Exception as e:
log_fmt = "Error creating server and volume for instance %s"
exc_fmt = _("Error creating server and volume for instance %s")
LOG.debug("End _create_server_volume for id: %s", self.id)
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
device_path = self.device_path
mount_point = CONF.get(datastore_manager).mount_point
volume_info = {'device_path': device_path, 'mount_point': mount_point}
LOG.debug("End _create_server_volume for id: %s", self.id)
return volume_info
def _build_sg_rules_mapping(self, rule_ports):
final = []
cidr = CONF.trove_security_group_rule_cidr
@ -785,22 +728,21 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
'to_': str(to_)})
return final
def _create_server_volume_individually(self, flavor_id, image_id,
security_groups, datastore_manager,
volume_size, availability_zone,
nics, files, volume_type,
scheduler_hints):
LOG.debug("Begin _create_server_volume_individually for id: %s",
self.id)
def _create_server_volume(self, flavor_id, image_id,
security_groups, datastore_manager,
volume_size, availability_zone,
nics, files, volume_type,
scheduler_hints):
LOG.debug("Begin _create_server_volume for id: %s", self.id)
server = None
volume_info = self._build_volume_info(datastore_manager,
volume_size=volume_size,
volume_type=volume_type)
block_device_mapping = volume_info['block_device']
block_device_mapping_v2 = volume_info['block_device']
try:
server = self._create_server(flavor_id, image_id, security_groups,
datastore_manager,
block_device_mapping,
block_device_mapping_v2,
availability_zone, nics, files,
scheduler_hints)
server_id = server.id
@ -811,8 +753,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
exc_fmt = _("Failed to create server for instance %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
LOG.debug("End _create_server_volume_individually for id: %s",
self.id)
LOG.debug("End _create_server_volume for id: %s", self.id)
return volume_info
def _build_volume_info(self, datastore_manager, volume_size=None,
@ -842,7 +783,6 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
'block_device': None,
'device_path': device_path,
'mount_point': mount_point,
'volumes': None,
}
return volume_info
@ -887,12 +827,25 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
def _build_volume(self, v_ref, datastore_manager):
LOG.debug("Created volume %s", v_ref)
# The mapping is in the format:
# <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>]
# setting the delete_on_terminate instance to true=1
mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1)
# TODO(zhaochao): from Liberty, Nova libvirt driver does not honor
# user-supplied device name anymore, so we may need find a new
# method to make sure the volume is correctly mounted inside the
# guest, please refer to the 'intermezzo-problem-with-device-names'
# section of Nova user referrence at:
# https://docs.openstack.org/nova/latest/user/block-device-mapping.html
bdm = CONF.block_device_mapping
block_device = {bdm: mapping}
# use Nova block_device_mapping_v2, referrence:
# https://developer.openstack.org/api-ref/compute/#create-server
# setting the delete_on_terminate instance to true=1
block_device_v2 = [{
"uuid": v_ref.id,
"source_type": "volume",
"destination_type": "volume",
"device_name": bdm,
"volume_size": v_ref.size,
"delete_on_termination": True
}]
created_volumes = [{'id': v_ref.id,
'size': v_ref.size}]
@ -903,15 +856,14 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
"volume = %(volume)s\n"
"device_path = %(path)s\n"
"mount_point = %(point)s",
{"device": block_device,
{"device": block_device_v2,
"volume": created_volumes,
"path": device_path,
"point": mount_point})
volume_info = {'block_device': block_device,
volume_info = {'block_device': block_device_v2,
'device_path': device_path,
'mount_point': mount_point,
'volumes': created_volumes}
'mount_point': mount_point}
return volume_info
def _prepare_userdata(self, datastore_manager):
@ -924,17 +876,17 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
return userdata
def _create_server(self, flavor_id, image_id, security_groups,
datastore_manager, block_device_mapping,
datastore_manager, block_device_mapping_v2,
availability_zone, nics, files={},
scheduler_hints=None):
userdata = self._prepare_userdata(datastore_manager)
name = self.hostname or self.name
bdmap = block_device_mapping
bdmap_v2 = block_device_mapping_v2
config_drive = CONF.use_nova_server_config_drive
server = self.nova_client.servers.create(
name, image_id, flavor_id, files=files, userdata=userdata,
security_groups=security_groups, block_device_mapping=bdmap,
security_groups=security_groups, block_device_mapping_v2=bdmap_v2,
availability_zone=availability_zone, nics=nics,
config_drive=config_drive, scheduler_hints=scheduler_hints)
LOG.debug("Created new compute instance %(server_id)s "

View File

@ -101,7 +101,7 @@ class FakeServer(object):
next_local_id = 0
def __init__(self, parent, owner, id, name, image_id, flavor_ref,
block_device_mapping, volumes):
volumes):
self.owner = owner # This is a context.
self.id = id
self.parent = parent
@ -266,30 +266,13 @@ class FakeServers(object):
server.owner.tenant == self.context.tenant)
def create(self, name, image_id, flavor_ref, files=None, userdata=None,
block_device_mapping=None, volume=None, security_groups=None,
block_device_mapping_v2=None, security_groups=None,
availability_zone=None, nics=None, config_drive=False,
scheduler_hints=None):
id = "FAKE_%s" % uuid.uuid4()
if volume:
volume = self.volumes.create(volume['size'], volume['name'],
volume['description'])
while volume.status == "BUILD":
eventlet.sleep(0.1)
if volume.status != "available":
LOG.info("volume status = %s", volume.status)
raise nova_exceptions.ClientException("Volume was bad!")
mapping = "%s::%s:%s" % (volume.id, volume.size, 1)
block_device_mapping = {'vdb': mapping}
volumes = [volume]
LOG.debug("Fake Volume Create %(volumeid)s with "
"status %(volumestatus)s",
{'volumeid': volume.id, 'volumestatus': volume.status})
else:
volumes = self._get_volumes_from_bdm(block_device_mapping)
for volume in volumes:
volume.schedule_status('in-use', 1)
volumes = self._get_volumes_from_bdm_v2(block_device_mapping_v2)
server = FakeServer(self, self.context, id, name, image_id, flavor_ref,
block_device_mapping, volumes)
volumes)
self.db[id] = server
if name.endswith('SERVER_ERROR'):
raise nova_exceptions.ClientException("Fake server create error.")
@ -308,21 +291,15 @@ class FakeServers(object):
LOG.info("FAKE_SERVERS_DB : %s", str(FAKE_SERVERS_DB))
return server
def _get_volumes_from_bdm(self, block_device_mapping):
def _get_volumes_from_bdm_v2(self, block_device_mapping_v2):
volumes = []
if block_device_mapping is not None:
# block_device_mapping is a dictionary, where the key is the
# device name on the compute instance and the mapping info is a
# set of fields in a string, separated by colons.
# For each device, find the volume, and record the mapping info
# to another fake object and attach it to the volume
# so that the fake API can later retrieve this.
for device in block_device_mapping:
mapping = block_device_mapping[device]
(id, _type, size, delete_on_terminate) = mapping.split(":")
volume = self.volumes.get(id)
volume.mapping = FakeBlockDeviceMappingInfo(
id, device, _type, size, delete_on_terminate)
if block_device_mapping_v2 is not None:
# block_device_mapping_v2 is an array of dicts. Every dict is
# a volume attachment, which contains "uuid", "source_type",
# "destination_type", "device_name", "volume_size" and
# "delete_on_termination".
for bdm in block_device_mapping_v2:
volume = self.volumes.get(bdm['uuid'])
volumes.append(volume)
return volumes
@ -337,12 +314,6 @@ class FakeServers(object):
else:
raise nova_exceptions.NotFound(404, "Bad permissions")
def get_server_volumes(self, server_id):
"""Fake method we've added to grab servers from the volume."""
return [volume.mapping
for volume in self.get(server_id).volumes
if volume.mapping is not None]
def list(self):
return [v for (k, v) in self.db.items() if self.can_see(v.id)]
@ -390,25 +361,6 @@ class FakeRdServers(object):
return [FakeRdServer(server) for server in self.servers.list()]
class FakeServerVolumes(object):
def __init__(self, context):
self.context = context
def get_server_volumes(self, server_id):
class ServerVolumes(object):
def __init__(self, block_device_mapping):
LOG.debug("block_device_mapping = %s", block_device_mapping)
device = block_device_mapping['vdb']
(self.volumeId,
self.type,
self.size,
self.delete_on_terminate) = device.split(":")
fake_servers = FakeServers(self.context, FLAVORS)
server = fake_servers.get(server_id)
return [ServerVolumes(server.block_device_mapping)]
class FakeVolume(object):
def __init__(self, parent, owner, id, size, name,
@ -846,9 +798,6 @@ class FakeClient(object):
self.security_group_rules = FakeSecurityGroupRules(context)
self.server_groups = FakeServerGroups(context)
def get_server_volumes(self, server_id):
return self.servers.get_server_volumes(server_id)
def rescan_server_volume(self, server, volume_id):
LOG.info("FAKE rescanning volume.")

View File

@ -81,13 +81,14 @@ class fake_Server(object):
self.files = None
self.userdata = None
self.security_groups = None
self.block_device_mapping = None
self.block_device_mapping_v2 = None
self.status = 'ACTIVE'
class fake_ServerManager(object):
def create(self, name, image_id, flavor_id, files, userdata,
security_groups, block_device_mapping, availability_zone=None,
security_groups, block_device_mapping_v2=None,
availability_zone=None,
nics=None, config_drive=False,
scheduler_hints=None):
server = fake_Server()
@ -98,7 +99,7 @@ class fake_ServerManager(object):
server.files = files
server.userdata = userdata
server.security_groups = security_groups
server.block_device_mapping = block_device_mapping
server.block_device_mapping_v2 = block_device_mapping_v2
server.availability_zone = availability_zone
server.nics = nics
return server
@ -297,6 +298,25 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
# verify
self.assertIsNotNone(server)
@patch.object(taskmanager_models.FreshInstanceTasks, 'hostname',
new_callable=PropertyMock,
return_value='fake-hostname')
def test_servers_create_block_device_mapping_v2(self, mock_hostname):
mock_nova_client = self.freshinstancetasks.nova_client = Mock()
mock_servers_create = mock_nova_client.servers.create
self.freshinstancetasks._create_server('fake-flavor', 'fake-image',
None, 'mysql', None, None,
None)
mock_servers_create.assert_called_with('fake-hostname', 'fake-image',
'fake-flavor', files={},
userdata=None,
security_groups=None,
block_device_mapping_v2=None,
availability_zone=None,
nics=None,
config_drive=True,
scheduler_hints=None)
@patch.object(InstanceServiceStatus, 'find_by',
return_value=fake_InstanceServiceStatus.find_by())
@patch.object(DBInstance, 'find_by',
@ -375,6 +395,62 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
'mysql', mock_build_volume_info()['block_device'], None,
None, mock_get_injected_files(), {'group': 'sg-id'})
@patch.object(BaseInstance, 'update_db')
@patch.object(taskmanager_models, 'create_cinder_client')
@patch.object(taskmanager_models.FreshInstanceTasks, 'device_path',
new_callable=PropertyMock,
return_value='fake-device-path')
@patch.object(taskmanager_models.FreshInstanceTasks, 'volume_support',
new_callable=PropertyMock,
return_value=True)
def test_build_volume_info(self, mock_volume_support, mock_device_path,
mock_create_cinderclient, mock_update_db):
cfg.CONF.set_override('block_device_mapping', 'fake-bdm')
cfg.CONF.set_override('mount_point', 'fake-mount-point',
group='mysql')
mock_cinderclient = mock_create_cinderclient.return_value
mock_volume = Mock(name='fake-vol', id='fake-vol-id',
size=2, status='available')
mock_cinderclient.volumes.create.return_value = mock_volume
mock_cinderclient.volumes.get.return_value = mock_volume
volume_info = self.freshinstancetasks._build_volume_info(
'mysql', volume_size=2, volume_type='volume_type')
expected = {
'block_device': [{
'uuid': 'fake-vol-id',
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'fake-bdm',
'volume_size': 2,
'delete_on_termination': True}],
'device_path': 'fake-device-path',
'mount_point': 'fake-mount-point'
}
self.assertEqual(expected, volume_info)
@patch.object(BaseInstance, 'update_db')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_volume')
@patch.object(taskmanager_models.FreshInstanceTasks, 'device_path',
new_callable=PropertyMock,
return_value='fake-device-path')
@patch.object(taskmanager_models.FreshInstanceTasks, 'volume_support',
new_callable=PropertyMock,
return_value=False)
def test_build_volume_info_without_volume(self, mock_volume_support,
mock_device_path,
mock_create_volume,
mock_update_db):
cfg.CONF.set_override('mount_point', 'fake-mount-point',
group='mysql')
volume_info = self.freshinstancetasks._build_volume_info('mysql')
self.assertFalse(mock_create_volume.called)
expected = {
'block_device': None,
'device_path': 'fake-device-path',
'mount_point': 'fake-mount-point'
}
self.assertEqual(expected, volume_info)
@patch.object(trove.guestagent.api.API, 'attach_replication_slave')
@patch.object(rpc, 'get_client')
@patch.object(DBInstance, 'get_by')