Manage existing BMs: Part-2
Add the adopt baremetal node API. Change-Id: I1897129930bface6a6c4a179e02d6107ff3811af Implements: bp manage-existing-bms
This commit is contained in:
parent
93da16d88f
commit
26508fd131
@ -4,7 +4,7 @@
|
||||
Manageable Servers
|
||||
===================
|
||||
|
||||
Lists manageable servers.
|
||||
Lists, manages manageable servers.
|
||||
|
||||
List manageable servers information
|
||||
===================================
|
||||
@ -38,3 +38,63 @@ Response
|
||||
|
||||
.. literalinclude:: samples/manageable_servers/manageable-servers-list-resp.json
|
||||
:language: javascript
|
||||
|
||||
Manage an existing server
|
||||
=========================
|
||||
|
||||
.. rest_method:: POST /manageable_servers
|
||||
|
||||
Manage a server.
|
||||
|
||||
Manage nodes in active which migrated by operators.
|
||||
|
||||
Normal response codes: 201
|
||||
|
||||
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
|
||||
conflict(409)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- node_uuid: manageable_servers_uuid
|
||||
- metadata: metadata
|
||||
|
||||
**Example Manage Server: JSON request**
|
||||
|
||||
.. literalinclude:: samples/servers/manageable-server-create-req.json
|
||||
:language: javascript
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- addresses: addresses
|
||||
- links: links
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- project_id: project_id_body
|
||||
- user_id: user_id_body
|
||||
- updated_at: updated_at
|
||||
- created_at: created_at
|
||||
- launched_at: launched_at
|
||||
- metadata: metadata
|
||||
- affinity_zone: affinity_zone
|
||||
- key_name: key_name
|
||||
- node_uuid: manageable_servers_uuid
|
||||
- partitions: partitions
|
||||
|
||||
**Example Manage Server: JSON response**
|
||||
|
||||
.. literalinclude:: samples/servers/manageable-server-manage-resp.json
|
||||
:language: javascript
|
||||
|
@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "test_manageable_server",
|
||||
"description": "This is a manageable server",
|
||||
"metadata" : {
|
||||
"My Server Name" : "Apache1"
|
||||
},
|
||||
"node_uuid": "aacdbd78-d670-409e-95aa-ecfcfb94fee2"
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "test_manageable_server",
|
||||
"description": "This is a manageable server",
|
||||
"flavor_uuid": null,
|
||||
"image_uuid": "efe0a06f-ca95-4808-b41e-9f55b9c5eb98",
|
||||
"availability_zone" : null,
|
||||
"status": "active",
|
||||
"power_state": "on",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/v1/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"uuid": "7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"created_at": "2016-09-27T02:37:21.966342+00:00",
|
||||
"launched_at": "2016-09-27T02:39:21.966342+00:00",
|
||||
"updated_at": null,
|
||||
"affinity_zone": null,
|
||||
"project_id": "2f15c3524826465a9afbd150478b3b76",
|
||||
"user_id": "a6205fcab03d4a289251f420456b1289",
|
||||
"addresses": [],
|
||||
"metadata": {
|
||||
"My Server Name" : "Apache1"
|
||||
},
|
||||
"key_name": null,
|
||||
"node_uuid": "aacdbd78-d670-409e-95aa-ecfcfb94fee2",
|
||||
"partitions": null
|
||||
}
|
@ -18,9 +18,14 @@ from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
|
||||
from mogan.api.controllers import base
|
||||
from mogan.api.controllers import link
|
||||
from mogan.api.controllers.v1.schemas import manageable_servers as schema
|
||||
from mogan.api.controllers.v1.servers import Server
|
||||
from mogan.api.controllers.v1 import types
|
||||
from mogan.api import expose
|
||||
from mogan.api import validation
|
||||
from mogan.common import policy
|
||||
from six.moves import http_client
|
||||
|
||||
|
||||
class ManageableServer(base.APIBase):
|
||||
@ -84,3 +89,26 @@ class ManageableServersController(rest.RestController):
|
||||
nodes = pecan.request.engine_api.get_manageable_servers(
|
||||
pecan.request.context)
|
||||
return ManageableServerCollection.convert_with_list_of_dicts(nodes)
|
||||
|
||||
@policy.authorize_wsgi("mogan:manageable_servers", "create", False)
|
||||
@expose.expose(Server, body=types.jsontype,
|
||||
status_code=http_client.CREATED)
|
||||
def post(self, server):
|
||||
"""Manage an existing bare metal node.
|
||||
|
||||
:param server: A manageable server within the request body
|
||||
:return: The server information.
|
||||
"""
|
||||
validation.check_schema(server, schema.manage_server)
|
||||
|
||||
manageable_server = pecan.request.engine_api.manage(
|
||||
pecan.request.context,
|
||||
server.get('node_uuid'),
|
||||
server.get('name'),
|
||||
server.get('description'),
|
||||
server.get('metadata'))
|
||||
|
||||
# Set the HTTP Location Header for the first server.
|
||||
pecan.response.location = link.build_url('server',
|
||||
manageable_server.uuid)
|
||||
return Server.convert_with_links(manageable_server)
|
||||
|
30
mogan/api/controllers/v1/schemas/manageable_servers.py
Normal file
30
mogan/api/controllers/v1/schemas/manageable_servers.py
Normal file
@ -0,0 +1,30 @@
|
||||
# Copyright 2017 Fiberhome Integration Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from mogan.api.validation import parameter_types
|
||||
|
||||
|
||||
manage_server = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
'name': parameter_types.name,
|
||||
'description': parameter_types.description,
|
||||
'node_uuid': parameter_types.node_uuid,
|
||||
'metadata': parameter_types.metadata
|
||||
},
|
||||
'required': ['name', 'node_uuid'],
|
||||
'additionalProperties': False,
|
||||
}
|
@ -68,6 +68,10 @@ server_group_id = {
|
||||
'type': 'string', 'format': 'uuid'
|
||||
}
|
||||
|
||||
node_uuid = {
|
||||
'type': 'string', 'format': 'uuid'
|
||||
}
|
||||
|
||||
metadata = {
|
||||
'type': 'object',
|
||||
'patternProperties': {
|
||||
|
@ -146,6 +146,30 @@ class BaseEngineDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_manageable_node(self, node_uuid):
|
||||
"""Get the manageable node information by uuid
|
||||
|
||||
:param node_uuid: The manageable node uuid.
|
||||
:return: A dict of manageable node information.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def manage(self, server, node_uuid):
|
||||
"""Manage an existing bare metal node.
|
||||
|
||||
:param server: The bare metal server object.
|
||||
:param node_uuid: The manageable bare metal node uuid.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def unmanage(self, server, node_uuid):
|
||||
"""Unmanage a bare metal node.
|
||||
|
||||
:param server: The bare metal server object.
|
||||
:param node_uuid: The manageable bare metal node uuid.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def load_engine_driver(engine_driver):
|
||||
"""Load a engine driver module.
|
||||
|
@ -754,3 +754,80 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
'portgroups': node.get('portgroups'),
|
||||
'image_source': node.get('image_source')})
|
||||
return manageable_nodes
|
||||
|
||||
def get_manageable_node(self, node_uuid):
|
||||
try:
|
||||
node = self.ironicclient.call('node.get', node_uuid)
|
||||
except ironic_exc.NotFound:
|
||||
raise exception.NodeNotFound(node=node_uuid)
|
||||
|
||||
if (node.instance_uuid is not None or
|
||||
node.provision_state != ironic_states.ACTIVE or
|
||||
node.resource_class is None):
|
||||
LOG.error("The node's instance uuid is %(instance_uuid)s, "
|
||||
"node's provision state is %(provision_state)s, "
|
||||
"node's resource class is %(resource_class)s",
|
||||
{"instance_uuid": node.instance_uuid,
|
||||
"provision_state": node.provision_state,
|
||||
"resource_class": node.resource_class})
|
||||
raise exception.NodeNotAllowedManaged(node_uuid=node_uuid)
|
||||
|
||||
# Retrieves ports
|
||||
params = {
|
||||
'limit': 0,
|
||||
'fields': ('uuid', 'node_uuid', 'extra', 'address',
|
||||
'internal_info')
|
||||
}
|
||||
|
||||
port_list = self.ironicclient.call("port.list", **params)
|
||||
portgroup_list = self.ironicclient.call("portgroup.list", **params)
|
||||
|
||||
# Add ports to the associated node
|
||||
node.ports = [self._port_or_group_resource(port)
|
||||
for port in port_list
|
||||
if node.uuid == port.node_uuid]
|
||||
# Add portgroups to the associated node
|
||||
node.portgroups = [self._port_or_group_resource(portgroup)
|
||||
for portgroup in portgroup_list
|
||||
if node.uuid == portgroup.node_uuid]
|
||||
node.power_state = map_power_state(node.power_state)
|
||||
manageable_node = self._node_resource(node)
|
||||
manageable_node['uuid'] = node_uuid
|
||||
|
||||
return manageable_node
|
||||
|
||||
def manage(self, server, node_uuid):
|
||||
"""Manage an existing bare metal node.
|
||||
|
||||
:param server: The bare metal server object.
|
||||
:param node_uuid: The manageable bare metal node uuid.
|
||||
"""
|
||||
# Associate the node with a server
|
||||
patch = [{'path': '/instance_uuid', 'op': 'add', 'value': server.uuid}]
|
||||
|
||||
try:
|
||||
self.ironicclient.call('node.update', node_uuid, patch,
|
||||
retry_on_conflict=False)
|
||||
except ironic_exc.BadRequest:
|
||||
msg = (_("Failed to update parameters on node %(node)s "
|
||||
"when provisioning the server %(server)s")
|
||||
% {'node': node_uuid, 'server': server.uuid})
|
||||
LOG.error(msg)
|
||||
raise exception.ServerDeployFailure(msg)
|
||||
|
||||
def unmanage(self, server, node_uuid):
|
||||
"""unmanage a bare metal node.
|
||||
|
||||
:param server: The bare metal server object.
|
||||
:param node_uuid: The manageable bare metal node uuid.
|
||||
"""
|
||||
patch = [{'path': '/instance_uuid', 'op': 'remove'}]
|
||||
|
||||
try:
|
||||
self.ironicclient.call('node.update', node_uuid, patch)
|
||||
except ironic_exc.BadRequest as e:
|
||||
LOG.warning("Failed to remove deploy parameters from node "
|
||||
"%(node)s when unprovisioning the server "
|
||||
"%(server)s: %(reason)s",
|
||||
{'node': node_uuid, 'server': server.uuid,
|
||||
'reason': six.text_type(e)})
|
||||
|
@ -499,4 +499,9 @@ class ServerGroupExists(Conflict):
|
||||
class GetManageableServersFailed(MoganException):
|
||||
_msg_fmt = _("Failed to get manageable servers from driver: %(reason)s")
|
||||
|
||||
|
||||
class NodeNotAllowedManaged(Forbidden):
|
||||
_msg_fmt = _("The bare metal node %(node_uuid)s is not allowed to "
|
||||
"be managed")
|
||||
|
||||
ObjectActionError = obj_exc.ObjectActionError
|
||||
|
@ -186,6 +186,9 @@ server_policies = [
|
||||
policy.RuleDefault('mogan:manageable_servers:get_all',
|
||||
'rule:admin_api',
|
||||
description='Get manageable nodes from driver'),
|
||||
policy.RuleDefault('mogan:manageable_servers:create',
|
||||
'rule:admin_api',
|
||||
description='Manage an existing baremetal server')
|
||||
]
|
||||
|
||||
|
||||
|
@ -656,3 +656,41 @@ class API(object):
|
||||
except Exception as e:
|
||||
raise exception.GetManageableServersFailed(reason=e)
|
||||
return mservers
|
||||
|
||||
def manage(self, context, node_uuid, name, description, metadata):
|
||||
"""Create a new server by managing an existing bare metal node
|
||||
|
||||
Sending manageable server information to the engine and will handle
|
||||
creating the DB entries.
|
||||
|
||||
Returns a server object
|
||||
"""
|
||||
self._check_num_servers_quota(context, 1, 1)
|
||||
|
||||
# Create the servers reservations
|
||||
reserve_opts = {'servers': 1}
|
||||
reservations = self.quota.reserve(context, **reserve_opts)
|
||||
if reservations:
|
||||
self.quota.commit(context, reservations)
|
||||
|
||||
# TODO(litao) we will support to specify user and project in
|
||||
# managing bare metal node later.
|
||||
base_options = {
|
||||
'image_uuid': None,
|
||||
'status': None,
|
||||
'user_id': context.user,
|
||||
'project_id': context.tenant,
|
||||
'power_state': states.NOSTATE,
|
||||
'name': name,
|
||||
'description': description,
|
||||
'locked': False,
|
||||
'metadata': metadata or {},
|
||||
'availability_zone': None}
|
||||
|
||||
server = objects.Server(context=context)
|
||||
server.update(base_options)
|
||||
server.uuid = uuidutils.generate_uuid()
|
||||
|
||||
server = self.engine_rpcapi.manage_server(context, server, node_uuid)
|
||||
|
||||
return server
|
||||
|
@ -324,7 +324,6 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
filter_properties['retry'] = retry
|
||||
request_spec['num_servers'] = len(servers)
|
||||
request_spec['server_ids'] = [s.uuid for s in servers]
|
||||
|
||||
try:
|
||||
nodes = self.scheduler_client.select_destinations(
|
||||
context, request_spec, filter_properties)
|
||||
@ -673,3 +672,90 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
|
||||
def get_manageable_servers(self, context):
|
||||
return self.driver.get_manageable_nodes()
|
||||
|
||||
def _manage_server(self, context, server, node):
|
||||
# Create the rp
|
||||
resource_class = sched_utils.ensure_resource_class_name(
|
||||
node['resource_class'])
|
||||
inventory = self.driver.get_node_inventory(node)
|
||||
inventory_data = {resource_class: inventory}
|
||||
# TODO(liusheng) need to ensure the inventory being rollback if
|
||||
# putting allocations failed.
|
||||
self.scheduler_client.set_inventory_for_provider(
|
||||
node['uuid'], node['name'] or node['uuid'], inventory_data,
|
||||
resource_class)
|
||||
# Allocate the resource
|
||||
self.scheduler_client.reportclient.put_allocations(
|
||||
node['uuid'], server.uuid, {resource_class: 1},
|
||||
server.project_id, server.user_id)
|
||||
|
||||
LOG.info("Starting to manage bare metal node %(node_uuid)s for "
|
||||
"server %(uuid)s",
|
||||
{"node_uuid": node['uuid'], "uuid": server.uuid})
|
||||
|
||||
nics_obj = objects.ServerNics(context)
|
||||
# Check networks
|
||||
all_ports = node['ports'] + node['portgroups']
|
||||
for vif in all_ports:
|
||||
neutron_port_id = vif['neutron_port_id']
|
||||
if neutron_port_id is not None:
|
||||
port_dict = self.network_api.show_port(
|
||||
context, neutron_port_id)
|
||||
|
||||
nic_dict = {'port_id': port_dict['id'],
|
||||
'network_id': port_dict['network_id'],
|
||||
'mac_address': port_dict['mac_address'],
|
||||
'fixed_ips': port_dict['fixed_ips'],
|
||||
'server_uuid': server.uuid}
|
||||
|
||||
# Check if the neutron port's mac address matches the port
|
||||
# address of bare metal nics.
|
||||
if nic_dict['mac_address'] != vif['address']:
|
||||
msg = (
|
||||
_("The address of neutron port %(port_id)s is "
|
||||
"%(address)s, but the nic address of bare metal "
|
||||
"node %(node_uuid)s is %(nic_address)s.") %
|
||||
{"port_id": nic_dict['port_id'],
|
||||
"address": nic_dict['mac_address'],
|
||||
"node_uuid": node['uuid'],
|
||||
"nic_address": vif['address']})
|
||||
raise exception.NetworkError(msg)
|
||||
|
||||
self.network_api.bind_port(context, neutron_port_id, server)
|
||||
server_nic = objects.ServerNic(context, **nic_dict)
|
||||
nics_obj.objects.append(server_nic)
|
||||
|
||||
# Manage the bare metal node
|
||||
self.driver.manage(server, node['uuid'])
|
||||
|
||||
image_uuid = node.get('image_source')
|
||||
if not uuidutils.is_uuid_like(image_uuid):
|
||||
image_uuid = None
|
||||
|
||||
# Set the server information
|
||||
server.image_uuid = image_uuid
|
||||
server.node_uuid = node['uuid']
|
||||
server.nics = nics_obj
|
||||
server.power_state = node['power_state']
|
||||
server.launched_at = timeutils.utcnow()
|
||||
server.status = states.ACTIVE
|
||||
if server.power_state == states.POWER_OFF:
|
||||
server.status = states.STOPPED
|
||||
|
||||
def manage_server(self, context, server, node_uuid):
|
||||
try:
|
||||
node = self.driver.get_manageable_node(node_uuid)
|
||||
self._manage_server(context, server, node)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._rollback_servers_quota(context, -1)
|
||||
# Save the server information
|
||||
try:
|
||||
server.create()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._rollback_servers_quota(context, -1)
|
||||
self.driver.unmanage(server, node['uuid'])
|
||||
|
||||
LOG.info("Manage server %s successfully.", server.uuid)
|
||||
return server
|
||||
|
@ -127,3 +127,8 @@ class EngineAPI(object):
|
||||
def get_manageable_servers(self, context):
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
return cctxt.call(context, 'get_manageable_servers')
|
||||
|
||||
def manage_server(self, context, server, node_uuid):
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
return cctxt.call(context, 'manage_server',
|
||||
server=server, node_uuid=node_uuid)
|
||||
|
@ -17,6 +17,16 @@ import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from mogan.tests.functional.api import v1 as v1_test
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
def gen_post_body(**kw):
|
||||
return {
|
||||
"name": kw.get("name", "test_manageable_server"),
|
||||
"description": kw.get("description", "This is a manageable server"),
|
||||
"node_uuid": "aacdbd78-d670-409e-95aa-ecfcfb94fee2",
|
||||
"metadata": {"My Server Name": "Apache1"}
|
||||
}
|
||||
|
||||
|
||||
class TestManageableServers(v1_test.APITestV1):
|
||||
@ -28,6 +38,8 @@ class TestManageableServers(v1_test.APITestV1):
|
||||
self.project_id = "0abcdef1-2345-6789-abcd-ef123456abc1"
|
||||
# evil_project is an wicked tenant, is used for unauthorization test.
|
||||
self.evil_project = "0abcdef1-2345-6789-abcd-ef123456abc9"
|
||||
self.server1 = utils.create_test_server(
|
||||
name="T1", project_id=self.project_id)
|
||||
|
||||
def test_server_get_manageable_servers_with_invalid_rule(self):
|
||||
self.context.tenant = self.evil_project
|
||||
@ -46,3 +58,25 @@ class TestManageableServers(v1_test.APITestV1):
|
||||
headers = self.gen_headers(self.context, roles="admin")
|
||||
resp = self.get_json('/manageable_servers', headers=headers)
|
||||
self.assertIn("uuid", resp['manageable_servers'][0])
|
||||
|
||||
@mock.patch('mogan.engine.api.API.manage')
|
||||
def test_manage_server_with_invalid_rule(self, mock_engine_manage):
|
||||
self.context.tenant = self.evil_project
|
||||
headers = self.gen_headers(self.context, roles="no-admin")
|
||||
body = gen_post_body()
|
||||
resp = self.post_json('/manageable_servers', body, expect_errors=True,
|
||||
headers=headers)
|
||||
error = self.parser_error_body(resp)
|
||||
self.assertEqual(self.DENY_MESSAGE % 'manageable_servers:create',
|
||||
error['faultstring'])
|
||||
|
||||
@mock.patch('mogan.engine.api.API.manage')
|
||||
def test_manage_server(self, mock_engine_manage):
|
||||
mock_engine_manage.side_effect = None
|
||||
mock_engine_manage.return_value = self.server1
|
||||
body = gen_post_body()
|
||||
# we can not prevent the evil tenant, quota will limite him.
|
||||
self.context.tenant = self.project_id
|
||||
headers = self.gen_headers(self.context, roles="admin")
|
||||
self.post_json('/manageable_servers', body, headers=headers,
|
||||
status=201)
|
||||
|
@ -396,3 +396,26 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
self.context,
|
||||
self.user_id,
|
||||
'test_keypair')
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'manage_server')
|
||||
@mock.patch.object(engine_api.API, '_check_num_servers_quota')
|
||||
def test_manage(self, check_quota_mock, mock_manage_server):
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
mock_manage_server.return_value = mock.MagicMock()
|
||||
|
||||
res = self.dbapi._get_quota_usages(self.context, self.project_id)
|
||||
before_in_use = 0
|
||||
if res.get('servers') is not None:
|
||||
before_in_use = res.get('servers').in_use
|
||||
|
||||
self.engine_api.manage(self.context,
|
||||
node_uuid=node_uuid,
|
||||
name='fake-name',
|
||||
description='fake-descritpion',
|
||||
metadata={'k1', 'v1'})
|
||||
|
||||
check_quota_mock.assert_called_once_with(self.context, 1, 1)
|
||||
self.assertTrue(mock_manage_server.called)
|
||||
res = self.dbapi._get_quota_usages(self.context, self.project_id)
|
||||
after_in_use = res.get('servers').in_use
|
||||
self.assertEqual(before_in_use + 1, after_in_use)
|
||||
|
@ -29,6 +29,8 @@ from mogan.engine import manager
|
||||
from mogan.network import api as network_api
|
||||
from mogan.notifications import base as notifications
|
||||
from mogan.objects import fields
|
||||
from mogan.objects import server
|
||||
from mogan.scheduler.client.report import SchedulerReportClient as report_api
|
||||
from mogan.tests.unit.db import base as tests_db_base
|
||||
from mogan.tests.unit.engine import mgr_utils
|
||||
from mogan.tests.unit.objects import utils as obj_utils
|
||||
@ -270,3 +272,219 @@ class ManageServerTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
self.service.get_manageable_servers(self.context)
|
||||
self._stop_service()
|
||||
get_manageable_mock.assert_called_once()
|
||||
|
||||
@mock.patch.object(network_api.API, 'bind_port')
|
||||
@mock.patch.object(IronicDriver, 'manage')
|
||||
@mock.patch.object(network_api.API, 'show_port')
|
||||
@mock.patch.object(report_api, 'put_allocations')
|
||||
def test__manage_servers(self,
|
||||
put_allocations_mock, show_port_mock,
|
||||
manage_mock, bind_port_mock):
|
||||
neutron_port_id = '67ec8e86-d77b-4729-b11d-a009864d289d'
|
||||
neutron_mac_address = '52:54:00:8e:6a:03'
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
image_uuid = 'efe0a06f-ca95-4808-b41e-9f55b9c5eb98'
|
||||
|
||||
node = {
|
||||
'uuid': node_uuid,
|
||||
'name': 'test_manageable_mode',
|
||||
'resource_class': 'gold',
|
||||
'power_state': 'power on',
|
||||
'provision_state': 'active',
|
||||
"ports": [
|
||||
{
|
||||
"address": neutron_mac_address,
|
||||
"uuid": "1ec01153-685a-49b5-a6d3-45a4e7dddf53",
|
||||
"neutron_port_id": neutron_port_id
|
||||
}
|
||||
],
|
||||
"portgroups": [
|
||||
{
|
||||
"address": "a4:dc:be:0e:82:a6",
|
||||
"uuid": "1ec01153-685a-49b5-a6d3-45a4e7dddf54",
|
||||
"neutron_port_id": None
|
||||
}
|
||||
],
|
||||
'image_source': image_uuid
|
||||
}
|
||||
|
||||
put_allocations_mock.side_effect = None
|
||||
show_port_mock.return_value = {
|
||||
'id': neutron_port_id,
|
||||
'network_id': '34ec8e86-d77b-4729-b11d-a009864d3456',
|
||||
'mac_address': neutron_mac_address,
|
||||
'fixed_ips': [{"subnet_id": "d2d7a7c2-17d2-4268-906d-1da8dde24fa8",
|
||||
"ip_address": "10.80.20.12"}]
|
||||
}
|
||||
|
||||
bind_port_mock.side_effect = None
|
||||
server = obj_utils.get_test_server(
|
||||
self.context, status=None, node_uuid=None,
|
||||
power_state=states.NOSTATE, availability_zone=None,
|
||||
image_uuid=None)
|
||||
|
||||
manage_mock.side_effect = None
|
||||
self.service._manage_server(self.context, server, node)
|
||||
|
||||
put_allocations_mock.assert_called_once()
|
||||
manage_mock.assert_called_once()
|
||||
show_port_mock.assert_called_once_with(self.context, neutron_port_id)
|
||||
bind_port_mock.assert_called_once_with(self.context, neutron_port_id,
|
||||
server)
|
||||
self.assertEqual(server.node_uuid, node_uuid)
|
||||
self.assertIsNone(server.availability_zone)
|
||||
self.assertEqual(server.status, 'active')
|
||||
self.assertEqual(server.power_state, 'power on')
|
||||
self.assertEqual(server.image_uuid, image_uuid)
|
||||
|
||||
@mock.patch.object(network_api.API, 'bind_port')
|
||||
@mock.patch.object(IronicDriver, 'manage')
|
||||
@mock.patch.object(network_api.API, 'show_port')
|
||||
@mock.patch.object(report_api, 'put_allocations')
|
||||
def test__manage_servers_with_mac_exception(self,
|
||||
put_allocations_mock,
|
||||
show_port_mock,
|
||||
manage_mock, bind_port_mock):
|
||||
neutron_port_id1 = '67ec8e86-d77b-4729-b11d-a009864d289d'
|
||||
neutron_port_id2 = '67ec8e86-d77b-4729-b11d-a009864d289d'
|
||||
neutron_mac_address1 = '52:54:00:8e:6a:03'
|
||||
neutron_mac_address2 = '52:54:00:8e:6a:04'
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
|
||||
node = {
|
||||
'uuid': node_uuid,
|
||||
'name': 'test_manageable_mode',
|
||||
'resource_class': 'gold',
|
||||
'power_state': 'power on',
|
||||
'provision_state': 'active',
|
||||
"ports": [
|
||||
{
|
||||
"address": neutron_mac_address1,
|
||||
"uuid": "1ec01153-685a-49b5-a6d3-45a4e7dddf53",
|
||||
"neutron_port_id": neutron_port_id1
|
||||
}
|
||||
],
|
||||
"portgroups": [
|
||||
{
|
||||
"address": "a4:dc:be:0e:82:a6",
|
||||
"uuid": "1ec01153-685a-49b5-a6d3-45a4e7dddf54",
|
||||
"neutron_port_id": neutron_port_id2
|
||||
}
|
||||
],
|
||||
'image_source': 'efe0a06f-ca95-4808-b41e-9f55b9c5eb98'
|
||||
}
|
||||
|
||||
put_allocations_mock.side_effect = None
|
||||
show_port_mock.return_value = {
|
||||
'id': neutron_port_id1,
|
||||
'network_id': '34ec8e86-d77b-4729-b11d-a009864d3456',
|
||||
'mac_address': neutron_mac_address2,
|
||||
'fixed_ips': [{"subnet_id": "d2d7a7c2-17d2-4268-906d-1da8dde24fa8",
|
||||
"ip_address": "10.80.20.12"}]
|
||||
}
|
||||
|
||||
server = obj_utils.get_test_server(
|
||||
self.context, status=None, node_uuid=None,
|
||||
power_state=states.NOSTATE, availability_zone=None,
|
||||
image_uuid=None)
|
||||
|
||||
manage_mock.side_effect = None
|
||||
self.assertRaises(exception.NetworkError, self.service._manage_server,
|
||||
self.context, server, node)
|
||||
|
||||
put_allocations_mock.assert_called_once()
|
||||
show_port_mock.assert_called_with(self.context, neutron_port_id1)
|
||||
show_port_mock.assert_called_with(self.context, neutron_port_id2)
|
||||
manage_mock.assert_not_called()
|
||||
bind_port_mock.assert_not_called()
|
||||
self.assertNotEqual(server.node_uuid, node_uuid)
|
||||
self.assertIsNone(server.availability_zone)
|
||||
self.assertIsNone(server.status, None)
|
||||
self.assertEqual(server.power_state, states.NOSTATE)
|
||||
self.assertIsNone(server.image_uuid, None)
|
||||
|
||||
@mock.patch.object(server.Server, 'create')
|
||||
@mock.patch.object(IronicDriver, 'unmanage')
|
||||
@mock.patch.object(manager.EngineManager, '_manage_server')
|
||||
@mock.patch.object(IronicDriver, 'get_manageable_node')
|
||||
def test_manage_servers(self, get_manageable_mock,
|
||||
manage_mock, umanage_mock, server_create_mock):
|
||||
get_manageable_mock.side_effect = None
|
||||
manage_mock.side_effect = None
|
||||
server_create_mock.side_effect = None
|
||||
|
||||
server = obj_utils.get_test_server(
|
||||
self.context, status=None, node_uuid=None,
|
||||
power_state=states.NOSTATE, availability_zone=None,
|
||||
image_uuid=None)
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
|
||||
self.service.manage_server(self.context, server, node_uuid)
|
||||
|
||||
get_manageable_mock.assert_called_once_with(node_uuid)
|
||||
manage_mock.assert_called_once()
|
||||
umanage_mock.assert_not_called()
|
||||
server_create_mock.assert_called_once()
|
||||
|
||||
@mock.patch.object(manager.EngineManager, '_rollback_servers_quota')
|
||||
@mock.patch.object(server.Server, 'create')
|
||||
@mock.patch.object(IronicDriver, 'unmanage')
|
||||
@mock.patch.object(manager.EngineManager, '_manage_server')
|
||||
@mock.patch.object(IronicDriver, 'get_manageable_node')
|
||||
def test_manage_servers_with_db_exception(self,
|
||||
get_manageable_mock,
|
||||
manage_mock,
|
||||
umanage_mock,
|
||||
server_create_mock,
|
||||
rollback_quota_mock):
|
||||
get_manageable_mock.side_effect = None
|
||||
manage_mock.side_effect = None
|
||||
server_create_mock.side_effect = exception.ServerAlreadyExists(
|
||||
"test-server")
|
||||
|
||||
server = obj_utils.get_test_server(
|
||||
self.context, status=None, node_uuid=None,
|
||||
power_state=states.NOSTATE, availability_zone=None,
|
||||
image_uuid=None)
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
|
||||
self.assertRaises(exception.ServerAlreadyExists,
|
||||
self.service.manage_server,
|
||||
self.context, server, node_uuid)
|
||||
|
||||
get_manageable_mock.assert_called_once_with(node_uuid)
|
||||
manage_mock.assert_called_once()
|
||||
umanage_mock.assert_called_once()
|
||||
server_create_mock.assert_called_once()
|
||||
rollback_quota_mock.assert_called_once_with(self.context, -1)
|
||||
|
||||
@mock.patch.object(manager.EngineManager, '_rollback_servers_quota')
|
||||
@mock.patch.object(server.Server, 'create')
|
||||
@mock.patch.object(IronicDriver, 'unmanage')
|
||||
@mock.patch.object(manager.EngineManager, '_manage_server')
|
||||
@mock.patch.object(IronicDriver, 'get_manageable_node')
|
||||
def test_manage_servers_with_network_exception(self,
|
||||
get_manageable_mock,
|
||||
manage_mock,
|
||||
umanage_mock,
|
||||
server_create_mock,
|
||||
rollback_quota_mock):
|
||||
get_manageable_mock.side_effect = None
|
||||
manage_mock.side_effect = exception.NetworkError()
|
||||
server_create_mock.side_effect = None
|
||||
|
||||
server = obj_utils.get_test_server(
|
||||
self.context, status=None, node_uuid=None,
|
||||
power_state=states.NOSTATE, availability_zone=None,
|
||||
image_uuid=None)
|
||||
node_uuid = 'aacdbd78-d670-409e-95aa-ecfcfb94fee2'
|
||||
|
||||
self.assertRaises(exception.NetworkError,
|
||||
self.service.manage_server,
|
||||
self.context, server, node_uuid)
|
||||
|
||||
get_manageable_mock.assert_called_once_with(node_uuid)
|
||||
manage_mock.assert_called_once()
|
||||
umanage_mock.assert_not_called()
|
||||
server_create_mock.assert_not_called()
|
||||
rollback_quota_mock.assert_called_once_with(self.context, -1)
|
||||
|
Loading…
Reference in New Issue
Block a user