Return node(name or uuid) with server instead of node_uuid
Currently we support to list compute nodes and aggregate nodes with names, but server show will return the node uuid always, which make the admins have to go ironic to get the relationship between name and uuid, so this changes to return node name. Change-Id: Ia0e5122aa29c4de2e870c5b71c420de49530ba92 Closes-Bug: #1715036
This commit is contained in:
parent
1094e56a45
commit
5124ae1d44
@ -538,9 +538,9 @@ nics:
|
||||
in: body
|
||||
required: true
|
||||
type: object
|
||||
node_uuid:
|
||||
node:
|
||||
description: |
|
||||
The UUID of the node which our server associated with. Only visible for admin users.
|
||||
The name of the node which our server associated with. Only visible for admin users.
|
||||
in: body
|
||||
required: false
|
||||
type: string
|
||||
|
@ -61,6 +61,7 @@ Response
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- addresses: addresses
|
||||
- node: node
|
||||
- links: links
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
@ -212,6 +213,7 @@ Response
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- addresses: addresses
|
||||
- node: node
|
||||
- links: links
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
@ -264,7 +266,7 @@ Response
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- addresses: addresses
|
||||
- node_uuid: node_uuid
|
||||
- node: node
|
||||
- links: links
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
@ -328,6 +330,7 @@ Response
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- addresses: addresses
|
||||
- node: node
|
||||
- links: links
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
|
@ -46,7 +46,7 @@ import re
|
||||
_DEFAULT_SERVER_RETURN_FIELDS = ('uuid', 'name', 'description',
|
||||
'status', 'power_state')
|
||||
|
||||
_ONLY_ADMIN_VISIBLE_SEVER_FIELDS = ('node_uuid', 'affinity_zone',)
|
||||
_ONLY_ADMIN_VISIBLE_SEVER_FIELDS = ('node', 'affinity_zone',)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -447,8 +447,8 @@ class Server(base.APIBase):
|
||||
fault = {wtypes.text: types.jsontype}
|
||||
"""The fault of the server"""
|
||||
|
||||
node_uuid = types.uuid
|
||||
"""The node UUID of the server"""
|
||||
node = wtypes.text
|
||||
"""The backend node of the server"""
|
||||
|
||||
affinity_zone = wtypes.text
|
||||
"""The affinity zone of the server"""
|
||||
|
@ -52,18 +52,18 @@ class BaseEngineDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_power_state(self, context, node_uuid, state):
|
||||
def set_power_state(self, context, node, state):
|
||||
"""Set a node's power state.
|
||||
|
||||
:param node_uuid: node id to change power state.
|
||||
:param node: node name or id to change power state.
|
||||
:param state: mogan states to change to.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_ports_from_node(self, node_uuid, detail=True):
|
||||
def get_ports_from_node(self, node, detail=True):
|
||||
"""Get a node's ports info.
|
||||
|
||||
:param node_uuid: node id to get ports info.
|
||||
:param node: node name or id to get ports info.
|
||||
:param detail: whether to get detailed info of all the ports,
|
||||
default to False.
|
||||
"""
|
||||
|
@ -88,9 +88,9 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
super(IronicDriver, self).__init__()
|
||||
self.ironicclient = ironic.IronicClientWrapper()
|
||||
|
||||
def _get_node(self, node_uuid):
|
||||
"""Get a node by its UUID."""
|
||||
return self.ironicclient.call('node.get', node_uuid,
|
||||
def _get_node(self, node):
|
||||
"""Get a node by its name."""
|
||||
return self.ironicclient.call('node.get', node,
|
||||
fields=_NODE_FIELDS)
|
||||
|
||||
def _validate_server_and_node(self, server):
|
||||
@ -215,16 +215,16 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
|
||||
_log_ironic_polling(message, node, server)
|
||||
|
||||
def get_ports_from_node(self, node_uuid, detail=True):
|
||||
def get_ports_from_node(self, node, detail=True):
|
||||
"""List the MAC addresses and the port types from a node."""
|
||||
ports = self.ironicclient.call("node.list_ports",
|
||||
node_uuid, detail=detail)
|
||||
portgroups = self.ironicclient.call("portgroup.list", node=node_uuid,
|
||||
node, detail=detail)
|
||||
portgroups = self.ironicclient.call("portgroup.list", node=node,
|
||||
detail=detail)
|
||||
return ports + portgroups
|
||||
|
||||
def plug_vif(self, node_uuid, port_id):
|
||||
self.ironicclient.call("node.vif_attach", node_uuid, port_id)
|
||||
def plug_vif(self, node, port_id):
|
||||
self.ironicclient.call("node.vif_attach", node, port_id)
|
||||
|
||||
def unplug_vif(self, context, server, port_id):
|
||||
LOG.debug("unplug: server_uuid=%(uuid)s vif=%(server_nics)s "
|
||||
@ -232,7 +232,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
{'uuid': server.uuid,
|
||||
'server_nics': str(server.nics),
|
||||
'port_id': port_id})
|
||||
node = self._get_node(server.node_uuid)
|
||||
node = self._get_node(server.node)
|
||||
self._unplug_vif(node, port_id)
|
||||
|
||||
def _unplug_vif(self, node, port_id):
|
||||
@ -255,30 +255,30 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
|
||||
# The engine manager is meant to know the node uuid, so missing uuid
|
||||
# is a significant issue. It may mean we've been passed the wrong data.
|
||||
node_uuid = server.node_uuid
|
||||
if not node_uuid:
|
||||
node_ident = server.node
|
||||
if not node_ident:
|
||||
raise ironic_exc.BadRequest(
|
||||
_("Ironic node uuid not supplied to "
|
||||
"driver for server %s.") % server.uuid)
|
||||
|
||||
# add server info to node
|
||||
node = self._get_node(node_uuid)
|
||||
node = self._get_node(node_ident)
|
||||
self._add_server_info_to_node(node, server)
|
||||
|
||||
# validate we are ready to do the deploy
|
||||
validate_chk = self.ironicclient.call("node.validate", node_uuid)
|
||||
validate_chk = self.ironicclient.call("node.validate", node_ident)
|
||||
if (not validate_chk.deploy.get('result')
|
||||
or not validate_chk.power.get('result')):
|
||||
raise exception.ValidationError(_(
|
||||
"Ironic node: %(id)s failed to validate."
|
||||
" (deploy: %(deploy)s, power: %(power)s)")
|
||||
% {'id': server.node_uuid,
|
||||
% {'id': server.node,
|
||||
'deploy': validate_chk.deploy,
|
||||
'power': validate_chk.power})
|
||||
|
||||
# trigger the node deploy
|
||||
try:
|
||||
self.ironicclient.call("node.set_provision_state", node_uuid,
|
||||
self.ironicclient.call("node.set_provision_state", node_ident,
|
||||
ironic_states.ACTIVE,
|
||||
configdrive=configdrive_value)
|
||||
except Exception as e:
|
||||
@ -300,7 +300,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
LOG.error("Error deploying server %(server)s on "
|
||||
"baremetal node %(node)s.",
|
||||
{'server': server.uuid,
|
||||
'node': node_uuid})
|
||||
'node': node_ident})
|
||||
|
||||
def _unprovision(self, server, node):
|
||||
"""This method is called from destroy() to unprovision
|
||||
@ -524,14 +524,14 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
"""
|
||||
LOG.debug('Rebuild called for server', server=server)
|
||||
|
||||
node_uuid = server.node_uuid
|
||||
node = self._get_node(node_uuid)
|
||||
node_ident = server.node_ident
|
||||
node = self._get_node(node_ident)
|
||||
self._add_server_info_to_node(node, server)
|
||||
|
||||
# trigger the node rebuild
|
||||
try:
|
||||
self.ironicclient.call("node.set_provision_state",
|
||||
node_uuid,
|
||||
node_ident,
|
||||
ironic_states.REBUILD)
|
||||
except (ironic_exc.InternalServerError,
|
||||
ironic_exc.BadRequest) as e:
|
||||
@ -561,17 +561,17 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
for the server
|
||||
"""
|
||||
node = self._validate_server_and_node(server)
|
||||
node_uuid = node.uuid
|
||||
node_ident = node.uuid
|
||||
|
||||
def _get_console():
|
||||
"""Request ironicclient to acquire node console."""
|
||||
try:
|
||||
return self.ironicclient.call('node.get_console', node_uuid)
|
||||
return self.ironicclient.call('node.get_console', node_ident)
|
||||
except (ironic_exc.InternalServerError,
|
||||
ironic_exc.BadRequest) as e:
|
||||
LOG.error('Failed to acquire console information for '
|
||||
'node %(server)s: %(reason)s',
|
||||
{'server': node_uuid,
|
||||
{'server': node_ident,
|
||||
'reason': e})
|
||||
raise exception.ConsoleNotAvailable()
|
||||
|
||||
@ -590,13 +590,13 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
"""Request ironicclient to enable/disable node console."""
|
||||
try:
|
||||
self.ironicclient.call(
|
||||
'node.set_console_mode', node_uuid, mode)
|
||||
'node.set_console_mode', node_ident, mode)
|
||||
except (ironic_exc.InternalServerError, # Validations
|
||||
ironic_exc.BadRequest) as e: # Maintenance
|
||||
LOG.error('Failed to set console mode to "%(mode)s" '
|
||||
'for node %(node)s: %(reason)s',
|
||||
{'mode': mode,
|
||||
'node': node_uuid,
|
||||
'node': node_ident,
|
||||
'reason': e})
|
||||
raise exception.ConsoleNotAvailable()
|
||||
|
||||
@ -609,7 +609,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
LOG.error('Timeout while waiting for console mode to be '
|
||||
'set to "%(mode)s" on node %(node)s',
|
||||
{'mode': mode,
|
||||
'node': node_uuid})
|
||||
'node': node_ident})
|
||||
raise exception.ConsoleNotAvailable()
|
||||
|
||||
# Acquire the console
|
||||
@ -637,7 +637,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
return {'node': node,
|
||||
'console_info': console['console_info']}
|
||||
else:
|
||||
LOG.debug('Console is disabled for node %s', node_uuid)
|
||||
LOG.debug('Console is disabled for node %s', node_ident)
|
||||
raise exception.ConsoleNotAvailable()
|
||||
|
||||
def get_serial_console(self, context, server, console_type):
|
||||
|
@ -74,7 +74,7 @@ def upgrade():
|
||||
sa.Column('image_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('launched_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('availability_zone', sa.String(length=255), nullable=True),
|
||||
sa.Column('node_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('node', sa.String(length=255), nullable=True),
|
||||
sa.Column('extra', sa.Text(), nullable=True),
|
||||
sa.Column('locked', sa.Boolean(), nullable=True),
|
||||
sa.Column('affinity_zone', sa.String(length=255), nullable=True),
|
||||
|
@ -127,8 +127,8 @@ class Connection(api.Connection):
|
||||
query = query.filter_by(flavor_uuid=filters['flavor_uuid'])
|
||||
if 'image_uuid' in filters:
|
||||
query = query.filter_by(image_uuid=filters['image_uuid'])
|
||||
if 'node_uuid' in filters:
|
||||
query = query.filter_by(node_uuid=filters['node_uuid'])
|
||||
if 'node' in filters:
|
||||
query = query.filter_by(node=filters['node'])
|
||||
return query
|
||||
|
||||
@oslo_db_api.retry_on_deadlock
|
||||
|
@ -85,7 +85,7 @@ class Server(Base):
|
||||
flavor_uuid = Column(String(36), nullable=True)
|
||||
availability_zone = Column(String(255), nullable=True)
|
||||
image_uuid = Column(String(36), nullable=True)
|
||||
node_uuid = Column(String(36), nullable=True)
|
||||
node = Column(String(255), nullable=True)
|
||||
launched_at = Column(DateTime, nullable=True)
|
||||
extra = Column(db_types.JsonEncodedDict)
|
||||
locked = Column(Boolean)
|
||||
|
@ -101,7 +101,7 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
|
||||
def revert(self, context, result, flow_failures, server, **kwargs):
|
||||
# Clean up associated node uuid
|
||||
server.node_uuid = None
|
||||
server.node = None
|
||||
server.save()
|
||||
|
||||
# Check if we have a cause which can tell us not to reschedule and
|
||||
@ -136,13 +136,13 @@ class BuildNetworkTask(flow_utils.MoganTask):
|
||||
|
||||
# TODO(zhenguo): This seems not needed as our scheduler has already
|
||||
# guaranteed this.
|
||||
ports = self.manager.driver.get_ports_from_node(server.node_uuid)
|
||||
ports = self.manager.driver.get_ports_from_node(server.node)
|
||||
if len(requested_networks) > len(ports):
|
||||
raise exception.InterfacePlugException(_(
|
||||
"Ironic node: %(id)s virtual to physical interface count"
|
||||
" mismatch"
|
||||
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
|
||||
% {'id': server.node_uuid,
|
||||
% {'id': server.node,
|
||||
'vif_count': len(requested_networks),
|
||||
'pif_count': len(ports)})
|
||||
|
||||
@ -166,8 +166,7 @@ class BuildNetworkTask(flow_utils.MoganTask):
|
||||
server_nic = objects.ServerNic(context, **nic_dict)
|
||||
nics_obj.objects.append(server_nic)
|
||||
|
||||
self.manager.driver.plug_vif(server.node_uuid,
|
||||
port['id'])
|
||||
self.manager.driver.plug_vif(server.node, port['id'])
|
||||
# Get updated VIF info
|
||||
port_dict = self.manager.network_api.show_port(
|
||||
context, port.get('id'))
|
||||
@ -267,7 +266,7 @@ class CreateServerTask(flow_utils.MoganTask):
|
||||
configdrive_value = configdrive.get('value')
|
||||
self.driver.spawn(context, server, configdrive_value)
|
||||
LOG.info('Successfully provisioned Ironic node %s',
|
||||
server.node_uuid)
|
||||
server.node)
|
||||
|
||||
def revert(self, context, result, flow_failures, server, **kwargs):
|
||||
LOG.debug("Server %s: destroy backend node", server.uuid)
|
||||
|
@ -101,7 +101,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
for rp in all_rps:
|
||||
if rp['uuid'] not in node_uuids:
|
||||
server_by_node = objects.Server.list(
|
||||
context, filters={'node_uuid': rp['uuid']})
|
||||
context, filters={'node': rp['name']})
|
||||
if server_by_node:
|
||||
continue
|
||||
self.scheduler_client.reportclient.delete_resource_provider(
|
||||
@ -336,7 +336,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
{"nodes": nodes})
|
||||
|
||||
for (server, node) in six.moves.zip(servers, nodes):
|
||||
server.node_uuid = node
|
||||
server.node = node
|
||||
server.save()
|
||||
# Add a retry entry for the selected node
|
||||
retry_nodes = retry['nodes']
|
||||
@ -458,7 +458,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
|
||||
# Issue delete request to driver only if server is associated with
|
||||
# a underlying node.
|
||||
if server.node_uuid:
|
||||
if server.node:
|
||||
do_delete_server(server)
|
||||
|
||||
server.power_state = states.NOSTATE
|
||||
@ -560,7 +560,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
try:
|
||||
vif = self.network_api.bind_port(context, vif_port['id'], server)
|
||||
vif_port = vif['port']
|
||||
self.driver.plug_vif(server.node_uuid, vif_port['id'])
|
||||
self.driver.plug_vif(server.node, vif_port['id'])
|
||||
nics_obj = objects.ServerNics(context)
|
||||
nic_dict = {'port_id': vif_port['id'],
|
||||
'network_id': vif_port['network_id'],
|
||||
|
@ -49,7 +49,7 @@ class Server(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
'image_uuid': object_fields.UUIDField(nullable=True),
|
||||
'nics': object_fields.ObjectField('ServerNics', nullable=True),
|
||||
'fault': object_fields.ObjectField('ServerFault', nullable=True),
|
||||
'node_uuid': object_fields.UUIDField(nullable=True),
|
||||
'node': object_fields.StringField(nullable=True),
|
||||
'launched_at': object_fields.DateTimeField(nullable=True),
|
||||
'metadata': object_fields.FlexibleDictField(nullable=True),
|
||||
'locked': object_fields.BooleanField(default=False),
|
||||
|
@ -128,7 +128,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
agg_uuids = [agg.uuid for agg in aggregates]
|
||||
query_filters = {'member_of': 'in:' + ','.join(agg_uuids)}
|
||||
rps = self.reportclient.get_filtered_resource_providers(query_filters)
|
||||
return [rp['uuid'] for rp in rps]
|
||||
return [rp['name'] for rp in rps]
|
||||
|
||||
def _get_filtered_affzs_nodes(self, context, server_group, filtered_nodes,
|
||||
num_servers):
|
||||
@ -260,7 +260,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
query_filters = {'resources': resources_filter}
|
||||
filtered_nodes = self.reportclient.\
|
||||
get_filtered_resource_providers(query_filters)
|
||||
return [node['uuid'] for node in filtered_nodes]
|
||||
return [node['name'] for node in filtered_nodes]
|
||||
|
||||
return list(filtered_nodes)
|
||||
|
||||
|
@ -315,12 +315,12 @@ class BaremetalNodeClient(rest_client.RestClient):
|
||||
body = self.deserialize(body)['nodes']
|
||||
return rest_client.ResponseBodyList(resp, body)
|
||||
|
||||
def show_bm_node(self, node_uuid=None, service_id=None):
|
||||
def show_bm_node(self, node_ident=None, service_id=None):
|
||||
if service_id:
|
||||
uri = '%s/nodes/detail?instance_uuid=%s' % (self.uri_prefix,
|
||||
service_id)
|
||||
else:
|
||||
uri = '%s/nodes/%s' % (self.uri_prefix, node_uuid)
|
||||
uri = '%s/nodes/%s' % (self.uri_prefix, node_ident)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = self.deserialize(body)
|
||||
|
@ -138,7 +138,7 @@ class TestServerAuthorization(v1_test.APITestV1):
|
||||
headers = self.gen_headers(self.context, roles="no-admin")
|
||||
resp = self.get_json('/servers/%s' % self.server1.uuid,
|
||||
headers=headers)
|
||||
self.assertNotIn('node_uuid', resp)
|
||||
self.assertNotIn('node', resp)
|
||||
|
||||
def test_server_get_one_by_admin(self):
|
||||
# when the evil tenant is admin, he can do everything.
|
||||
@ -146,7 +146,7 @@ class TestServerAuthorization(v1_test.APITestV1):
|
||||
headers = self.gen_headers(self.context, roles="admin")
|
||||
resp = self.get_json('/servers/%s' % self.server1.uuid,
|
||||
headers=headers)
|
||||
self.assertIn('node_uuid', resp)
|
||||
self.assertIn('node', resp)
|
||||
|
||||
def test_server_get_one_unauthorized(self):
|
||||
# not admin and not the owner
|
||||
|
@ -54,8 +54,7 @@ def get_test_server(**kw):
|
||||
'image_uuid': kw.get('image_uuid',
|
||||
'ac3b2291-b9ef-45f6-8eeb-21ac568a64a5'),
|
||||
'nics': kw.get('nics', fake_server_nics),
|
||||
'node_uuid': kw.get('node_uuid',
|
||||
'f978ef48-d4af-4dad-beec-e6174309bc71'),
|
||||
'node': kw.get('node', 'node-0'),
|
||||
'launched_at': kw.get('launched_at'),
|
||||
'extra': kw.get('extra', {}),
|
||||
'updated_at': kw.get('updated_at'),
|
||||
|
@ -95,7 +95,7 @@ class ManageServerTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
fake_node = mock.MagicMock()
|
||||
fake_node.provision_state = ironic_states.ACTIVE
|
||||
server = obj_utils.create_test_server(
|
||||
self.context, status=states.DELETING, node_uuid=None)
|
||||
self.context, status=states.DELETING, node=None)
|
||||
self._start_service()
|
||||
|
||||
self.service.delete_server(self.context, server)
|
||||
@ -153,7 +153,7 @@ class ManageServerTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
fake_node = mock.MagicMock()
|
||||
fake_node.provision_state = ironic_states.ACTIVE
|
||||
server = obj_utils.create_test_server(
|
||||
self.context, status=states.ACTIVE, node_uuid=None)
|
||||
self.context, status=states.ACTIVE, node=None)
|
||||
port_id = server['nics'][0]['port_id']
|
||||
self._start_service()
|
||||
self.service.detach_interface(self.context, server, port_id)
|
||||
|
@ -382,7 +382,7 @@ class _TestObject(object):
|
||||
# version bump. It is md5 hash of object fields and remotable methods.
|
||||
# The fingerprint values should only be changed if there is a version bump.
|
||||
expected_object_fingerprints = {
|
||||
'Server': '1.0-10767895d6aa0397ef2391d8ca2f8327',
|
||||
'Server': '1.0-75e6db8272082cd1adf3b74a28ecc9c1',
|
||||
'ServerFault': '1.0-74349ff701259e4834b4e9dc2dac1b12',
|
||||
'ServerFaultList': '1.0-43e8aad0258652921f929934e9e048fd',
|
||||
'Flavor': '1.0-9f7166aa387d89ec40cd699019d0c9a9',
|
||||
|
Loading…
Reference in New Issue
Block a user