Merge "Support multiple rack controllers"
This commit is contained in:
commit
756a063c30
Binary file not shown.
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
@ -29,6 +29,8 @@ import drydock_provisioner.objects as objects
|
||||
|
||||
from drydock_provisioner.control.util import get_internal_api_href
|
||||
from drydock_provisioner.orchestrator.actions.orchestrator import BaseAction
|
||||
from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerConflict
|
||||
from drydock_provisioner.drivers.node.maasdriver.errors import ApiNotAvailable
|
||||
|
||||
import drydock_provisioner.drivers.node.maasdriver.models.fabric as maas_fabric
|
||||
import drydock_provisioner.drivers.node.maasdriver.models.vlan as maas_vlan
|
||||
@ -138,25 +140,28 @@ class ValidateNodeServices(BaseMaasAction):
|
||||
ctx_type='NA')
|
||||
self.task.failure()
|
||||
else:
|
||||
healthy_rackd = []
|
||||
for r in rack_ctlrs:
|
||||
rack_svc = r.get_services()
|
||||
rack_name = r.hostname
|
||||
if r.is_healthy():
|
||||
healthy_rackd.append(r.hostname)
|
||||
else:
|
||||
msg = "Rack controller %s not healthy." % r.hostname
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=True,
|
||||
ctx=r.hostname,
|
||||
ctx_type='rack_ctlr')
|
||||
if not healthy_rackd:
|
||||
msg = "No healthy rack controllers found."
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=True,
|
||||
ctx='maas',
|
||||
ctx_type='cluster')
|
||||
self.task.failure()
|
||||
|
||||
for s in rack_svc:
|
||||
if s in maas_rack.RackController.REQUIRED_SERVICES:
|
||||
is_error = False
|
||||
if rack_svc[s] not in ("running", "off"):
|
||||
self.task.failure()
|
||||
is_error = True
|
||||
self.logger.info(
|
||||
"Service %s on rackd %s is %s" %
|
||||
(s, rack_name, rack_svc[s]))
|
||||
self.task.add_status_msg(
|
||||
msg="Service %s on rackd %s is %s" %
|
||||
(s, rack_name, rack_svc[s]),
|
||||
error=is_error,
|
||||
ctx=rack_name,
|
||||
ctx_type='rack_ctlr')
|
||||
except errors.TransientDriverError as ex:
|
||||
self.task.add_status_msg(
|
||||
msg=str(ex), error=True, ctx='NA', ctx_type='NA', retry=True)
|
||||
@ -278,8 +283,7 @@ class DestroyNode(BaseMaasAction):
|
||||
site_design)
|
||||
for n in nodes:
|
||||
try:
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
|
||||
if machine is None:
|
||||
msg = "Could not locate machine for node {}".format(n.name)
|
||||
@ -288,6 +292,13 @@ class DestroyNode(BaseMaasAction):
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.get_id())
|
||||
continue
|
||||
elif type(machine) == maas_rack.RackController:
|
||||
msg = "Cannot delete rack controller {}.".format(n.name)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.failure(focus=n.get_id())
|
||||
continue
|
||||
|
||||
# First release the node and erase its disks, if MaaS API allows
|
||||
if machine.status_name in self.actionable_node_statuses:
|
||||
@ -687,7 +698,7 @@ class CreateNetworkTemplate(BaseMaasAction):
|
||||
vlan_list.refresh()
|
||||
vlan = vlan_list.select(subnet.vlan)
|
||||
|
||||
if dhcp_on and not vlan.dhcp_on:
|
||||
if dhcp_on:
|
||||
# check if design requires a dhcp relay and if the MaaS vlan already uses a dhcp_relay
|
||||
msg = "DHCP enabled for subnet %s, activating in MaaS" % (
|
||||
subnet.name)
|
||||
@ -702,12 +713,25 @@ class CreateNetworkTemplate(BaseMaasAction):
|
||||
self.maas_client)
|
||||
rack_ctlrs.refresh()
|
||||
|
||||
# Reset DHCP stuff to avoid offline rack controllers
|
||||
|
||||
vlan.reset_dhcp_mgmt()
|
||||
dhcp_config_set = False
|
||||
|
||||
for r in rack_ctlrs:
|
||||
if n.dhcp_relay_upstream_target is not None:
|
||||
if r.interface_for_ip(
|
||||
n.dhcp_relay_upstream_target):
|
||||
if not r.is_healthy():
|
||||
msg = ("Rack controller %s with DHCP relay is not healthy." %
|
||||
r.hostname)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=True,
|
||||
ctx=n.name,
|
||||
ctx_type='network')
|
||||
break
|
||||
iface = r.interface_for_ip(
|
||||
n.dhcp_relay_upstream_target)
|
||||
vlan.relay_vlan = iface.vlan
|
||||
@ -730,21 +754,42 @@ class CreateNetworkTemplate(BaseMaasAction):
|
||||
self.logger.debug(msg)
|
||||
rackctl_id = r.resource_id
|
||||
|
||||
vlan.dhcp_on = True
|
||||
vlan.primary_rack = rackctl_id
|
||||
msg = "Enabling DHCP on VLAN %s managed by rack ctlr %s" % (
|
||||
vlan.resource_id, rackctl_id)
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='network')
|
||||
vlan.update()
|
||||
dhcp_config_set = True
|
||||
if not r.is_healthy():
|
||||
msg = ("Rack controller %s not healthy, skipping DHCP config." %
|
||||
r.resource_id)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=True,
|
||||
ctx=n.name,
|
||||
ctx_type='network')
|
||||
break
|
||||
try:
|
||||
vlan.dhcp_on = True
|
||||
vlan.add_rack_controller(
|
||||
rackctl_id)
|
||||
msg = "Enabling DHCP on VLAN %s managed by rack ctlr %s" % (
|
||||
vlan.resource_id, rackctl_id)
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='network')
|
||||
vlan.update()
|
||||
dhcp_config_set = True
|
||||
except RackControllerConflict as rack_ex:
|
||||
msg = (
|
||||
"More than two rack controllers on vlan %s, "
|
||||
"skipping enabling %s." %
|
||||
(vlan.resource_id, rackctl_id))
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='network')
|
||||
break
|
||||
if dhcp_config_set:
|
||||
break
|
||||
|
||||
if not dhcp_config_set:
|
||||
msg = "Network %s requires DHCP, but could not locate a rack controller to serve it." % (
|
||||
@ -757,9 +802,6 @@ class CreateNetworkTemplate(BaseMaasAction):
|
||||
ctx_type='network')
|
||||
self.task.failure(focus=n.name)
|
||||
|
||||
elif dhcp_on and vlan.dhcp_on:
|
||||
self.logger.info("DHCP already enabled for subnet %s" %
|
||||
(subnet.resource_id))
|
||||
except ValueError:
|
||||
raise errors.DriverError("Inconsistent data from MaaS")
|
||||
|
||||
@ -1026,21 +1068,6 @@ class IdentifyNode(BaseMaasAction):
|
||||
"""Action to identify a node resource in MaaS matching a node design."""
|
||||
|
||||
def start(self):
|
||||
try:
|
||||
machine_list = maas_machine.Machines(self.maas_client)
|
||||
machine_list.refresh()
|
||||
except Exception as ex:
|
||||
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
|
||||
self.task.set_status(hd_fields.TaskStatus.Complete)
|
||||
self.task.failure()
|
||||
self.task.add_status_msg(
|
||||
msg='Error accessing MaaS Machines API: %s' % str(ex),
|
||||
error=True,
|
||||
ctx='NA',
|
||||
ctx_type='NA')
|
||||
self.task.save()
|
||||
return
|
||||
|
||||
self.task.set_status(hd_fields.TaskStatus.Running)
|
||||
self.task.save()
|
||||
|
||||
@ -1062,37 +1089,56 @@ class IdentifyNode(BaseMaasAction):
|
||||
|
||||
for n in nodes:
|
||||
try:
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, domain=n.get_domain(site_design))
|
||||
if machine is not None:
|
||||
self.task.success(focus=n.get_id())
|
||||
self.task.add_status_msg(
|
||||
msg="Node %s identified in MaaS" % n.name,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
else:
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
if machine is None:
|
||||
self.task.failure(focus=n.get_id())
|
||||
self.task.add_status_msg(
|
||||
msg="Node %s not found in MaaS" % n.name,
|
||||
error=True,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
elif type(machine) == maas_machine.Machine:
|
||||
machine.update_identity(n, domain=n.get_domain(site_design))
|
||||
msg = "Node %s identified in MaaS" % n.name
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
self.task.success(focus=n.get_id())
|
||||
elif type(machine) == maas_rack.RackController:
|
||||
msg = "Rack controller %s identified in MaaS" % n.name
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
self.task.success(focus=n.get_id())
|
||||
except ApiNotAvailable as api_ex:
|
||||
self.logger.debug("Error accessing the MaaS API.", exc_info=api_ex)
|
||||
self.task.failure()
|
||||
self.task.add_status_msg(
|
||||
msg='Error accessing MaaS API: %s' % str(api_ex),
|
||||
error=True,
|
||||
ctx='NA',
|
||||
ctx_type='NA')
|
||||
self.task.save()
|
||||
except Exception as ex:
|
||||
self.logger.debug(
|
||||
"Exception caught in identify node.", exc_info=ex)
|
||||
self.task.failure(focus=n.get_id())
|
||||
self.task.add_status_msg(
|
||||
msg="Node %s not found in MaaS" % n.name,
|
||||
msg="Error trying to location %s in MAAS" % n.name,
|
||||
error=True,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
self.logger.debug(
|
||||
"Exception caught in identify node.", exc_info=ex)
|
||||
|
||||
self.task.set_status(hd_fields.TaskStatus.Complete)
|
||||
self.task.save()
|
||||
return
|
||||
|
||||
|
||||
class ConfigureHardware(BaseMaasAction):
|
||||
"""Action to start commissioning a server."""
|
||||
|
||||
@ -1136,9 +1182,15 @@ class ConfigureHardware(BaseMaasAction):
|
||||
try:
|
||||
self.logger.debug(
|
||||
"Locating node %s for commissioning" % (n.name))
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
if machine is not None:
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
if type(machine) == maas_rack.RackController:
|
||||
msg = "Located node %s in MaaS as rack controller. Skipping." % (
|
||||
n.name)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.get_id())
|
||||
elif machine is not None:
|
||||
if machine.status_name in [
|
||||
'New', 'Broken', 'Failed commissioning',
|
||||
'Failed testing'
|
||||
@ -1215,7 +1267,7 @@ class ConfigureHardware(BaseMaasAction):
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.get_id())
|
||||
else:
|
||||
msg = "Located node %s in MaaS, unknown status %s. Skipping..." % (
|
||||
msg = "Located node %s in MaaS, unknown status %s. Skipping." % (
|
||||
n, machine.status_name)
|
||||
self.logger.warning(msg)
|
||||
self.task.add_status_msg(
|
||||
@ -1323,10 +1375,20 @@ class ApplyNodeNetworking(BaseMaasAction):
|
||||
self.logger.debug(
|
||||
"Locating node %s for network configuration" % (n.name))
|
||||
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
|
||||
if machine is not None:
|
||||
if type(machine) is maas_rack.RackController:
|
||||
msg = ("Node %s is a rack controller, skipping deploy action." %
|
||||
n.name)
|
||||
self.logger.debug(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg,
|
||||
error=False,
|
||||
ctx=n.name,
|
||||
ctx_type='node')
|
||||
self.task.success(focus=n.name)
|
||||
continue
|
||||
elif machine is not None:
|
||||
if machine.status_name.startswith('Failed Dep'):
|
||||
msg = (
|
||||
"Node %s has failed deployment, releasing to try again."
|
||||
@ -1677,8 +1739,7 @@ class ApplyNodePlatform(BaseMaasAction):
|
||||
self.logger.debug(
|
||||
"Locating node %s for platform configuration" % (n.name))
|
||||
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
|
||||
if machine is None:
|
||||
msg = "Could not locate machine for node %s" % n.name
|
||||
@ -1695,7 +1756,14 @@ class ApplyNodePlatform(BaseMaasAction):
|
||||
msg=msg, error=True, ctx=n.name, ctx_type='node')
|
||||
continue
|
||||
|
||||
if machine.status_name == 'Deployed':
|
||||
if type(machine) is maas_rack.RackController:
|
||||
msg = ("Skipping changes to rack controller %s." % n.name)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.name)
|
||||
continue
|
||||
elif machine.status_name == 'Deployed':
|
||||
msg = (
|
||||
"Located node %s in MaaS, status deployed. Skipping "
|
||||
"and considering success. Destroy node first if redeploy needed."
|
||||
@ -1856,8 +1924,7 @@ class ApplyNodeStorage(BaseMaasAction):
|
||||
self.logger.debug(
|
||||
"Locating node %s for storage configuration" % (n.name))
|
||||
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
|
||||
if machine is None:
|
||||
msg = "Could not locate machine for node %s" % n.name
|
||||
@ -1874,7 +1941,15 @@ class ApplyNodeStorage(BaseMaasAction):
|
||||
self.task.failure(focus=n.get_id())
|
||||
continue
|
||||
|
||||
if machine.status_name == 'Deployed':
|
||||
if type(machine) is maas_rack.RackController:
|
||||
msg = ("Skipping configuration updates to rack controller %s." %
|
||||
n.name)
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.name)
|
||||
continue
|
||||
elif machine.status_name == 'Deployed':
|
||||
msg = (
|
||||
"Located node %s in MaaS, status deployed. Skipping "
|
||||
"and considering success. Destroy node first if redeploy needed."
|
||||
@ -2202,9 +2277,16 @@ class DeployNode(BaseMaasAction):
|
||||
|
||||
for n in nodes:
|
||||
try:
|
||||
machine = machine_list.identify_baremetal_node(
|
||||
n, update_name=False)
|
||||
if machine.status_name.startswith(
|
||||
machine = find_node_in_maas(self.maas_client, n)
|
||||
|
||||
if type(machine) is maas_rack.RackController:
|
||||
msg = "Skipping configuration of rack controller %s." % n.name
|
||||
self.logger.info(msg)
|
||||
self.task.add_status_msg(
|
||||
msg=msg, error=False, ctx=n.name, ctx_type='node')
|
||||
self.task.success(focus=n.name)
|
||||
continue
|
||||
elif machine.status_name.startswith(
|
||||
'Deployed') or machine.status_name.startswith(
|
||||
'Deploying'):
|
||||
msg = "Node %s already deployed or deploying, skipping." % (
|
||||
@ -2358,3 +2440,26 @@ class DeployNode(BaseMaasAction):
|
||||
self.task.save()
|
||||
|
||||
return
|
||||
|
||||
def find_node_in_maas(maas_client, node_model):
|
||||
"""Find a node in MAAS matching the node_model.
|
||||
|
||||
Note that the returned Machine may be a simple Machine or
|
||||
a RackController.
|
||||
|
||||
:param maas_client: instance of an active session to MAAS
|
||||
:param node_model: instance of objects.Node to match
|
||||
:returns: instance of maasdriver.models.Machine
|
||||
"""
|
||||
|
||||
machine_list = maas_machine.Machines(maas_client)
|
||||
machine_list.refresh()
|
||||
machine = machine_list.identify_baremetal_node(node_model)
|
||||
|
||||
if not machine:
|
||||
# If node isn't found a normal node, check rack controllers
|
||||
rackd_list = maas_rack.RackControllers(maas_client)
|
||||
rackd_list.refresh()
|
||||
machine = rackd_list.identify_baremetal_node(node_model)
|
||||
|
||||
return machine
|
||||
|
@ -128,8 +128,9 @@ class MaasRequestFactory(object):
|
||||
|
||||
for (k, v) in files.items():
|
||||
if v is None:
|
||||
continue
|
||||
elif isinstance(v, list):
|
||||
v = ""
|
||||
|
||||
if isinstance(v, list):
|
||||
for i in v:
|
||||
value = base64.b64encode(
|
||||
str(i).encode('utf-8')).decode('utf-8')
|
||||
|
26
python/drydock_provisioner/drivers/node/maasdriver/errors.py
Normal file
26
python/drydock_provisioner/drivers/node/maasdriver/errors.py
Normal file
@ -0,0 +1,26 @@
|
||||
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Errors and exceptions specific to MAAS node driver."""
|
||||
import drydock_provisioner.error as errors
|
||||
|
||||
|
||||
class RackControllerConflict(errors.DriverError):
|
||||
"""Exception for settings that are not allowed because not enough
|
||||
or too many rack controllers are attached to a network."""
|
||||
pass
|
||||
|
||||
|
||||
class ApiNotAvailable(errors.DriverError):
|
||||
"""Exception when trying to utilize the MAAS API and the connection
|
||||
fails."""
|
||||
pass
|
@ -37,6 +37,8 @@ class ResourceBase(object):
|
||||
for f in self.fields:
|
||||
if f in kwargs.keys():
|
||||
setattr(self, f, kwargs.get(f))
|
||||
else:
|
||||
setattr(self, f, None)
|
||||
|
||||
"""
|
||||
Update resource attributes from MaaS
|
||||
|
@ -235,6 +235,18 @@ class Interface(model_base.ResourceBase):
|
||||
|
||||
return False
|
||||
|
||||
def responds_to_mac(self, mac_address):
|
||||
"""Check if this interface will respond to a MAC address.
|
||||
|
||||
:param str mac_address: the MAC address to check
|
||||
|
||||
:return: true if this interface will respond to this MAC
|
||||
"""
|
||||
if mac_address.replace(':', '').upper() == self.mac_address.replace(':', '').upper():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def set_mtu(self, new_mtu):
|
||||
"""Set interface MTU.
|
||||
|
||||
|
@ -77,6 +77,18 @@ class Machine(model_base.ResourceBase):
|
||||
return i
|
||||
return None
|
||||
|
||||
def interface_for_mac(self, mac_address):
|
||||
"""Find the machine interface that owns the specified ``mac_address``.
|
||||
|
||||
:param str mac_address: The MAC address
|
||||
|
||||
:return: the interface that responds to this MAC or None
|
||||
"""
|
||||
for i in self.interfaces:
|
||||
if i.responds_to_mac(mac_address):
|
||||
return i
|
||||
return None
|
||||
|
||||
def get_power_params(self):
|
||||
"""Load power parameters for this node from MaaS."""
|
||||
url = self.interpolate_url()
|
||||
@ -426,6 +438,30 @@ class Machine(model_base.ResourceBase):
|
||||
"Failed updating power parameters MAAS url {} - return code {}\n{}"
|
||||
.format(url, resp.status_code.resp.text))
|
||||
|
||||
def update_identity(self, n, domain="local"):
|
||||
"""Update this node's identity based on the Node object ``n``
|
||||
|
||||
:param objects.Node n: The Node object to use as reference
|
||||
:param str domain: The DNS domain to register this node under
|
||||
"""
|
||||
try:
|
||||
self.hostname = n.name
|
||||
self.domain = domain
|
||||
self.update()
|
||||
if n.oob_type == 'libvirt':
|
||||
self.logger.debug(
|
||||
"Updating node %s MaaS power parameters for libvirt." %
|
||||
(n.name))
|
||||
oob_params = n.oob_parameters
|
||||
self.set_power_parameters(
|
||||
'virsh',
|
||||
power_address=oob_params.get('libvirt_uri'),
|
||||
power_id=n.name)
|
||||
self.logger.debug("Updated MaaS resource %s hostname to %s" %
|
||||
(self.resource_id, n.name))
|
||||
except Exception as ex:
|
||||
self.logger.debug("Error updating MAAS node: %s" % str(ex))
|
||||
|
||||
def to_dict(self):
|
||||
"""Serialize this resource instance into a dict.
|
||||
|
||||
@ -522,9 +558,7 @@ class Machines(model_base.ResourceCollectionBase):
|
||||
return node
|
||||
|
||||
def identify_baremetal_node(self,
|
||||
node_model,
|
||||
update_name=True,
|
||||
domain="local"):
|
||||
node_model):
|
||||
"""Find MaaS node resource matching Drydock BaremetalNode.
|
||||
|
||||
Search all the defined MaaS Machines and attempt to match
|
||||
@ -532,7 +566,6 @@ class Machines(model_base.ResourceCollectionBase):
|
||||
the MaaS instance with the correct hostname
|
||||
|
||||
:param node_model: Instance of objects.node.BaremetalNode to search MaaS for matching resource
|
||||
:param update_name: Whether Drydock should update the MaaS resource name to match the Drydock design
|
||||
"""
|
||||
maas_node = None
|
||||
|
||||
@ -552,46 +585,37 @@ class Machines(model_base.ResourceCollectionBase):
|
||||
node_oob_ip
|
||||
})
|
||||
except ValueError:
|
||||
self.logger.warn(
|
||||
self.logger.info(
|
||||
"Error locating matching MaaS resource for OOB IP %s" %
|
||||
(node_oob_ip))
|
||||
return None
|
||||
else:
|
||||
# Use boot_mac for node's not using IPMI
|
||||
node_boot_mac = node_model.boot_mac
|
||||
nodes = self.find_nodes_with_mac(node_model.boot_mac)
|
||||
|
||||
if node_boot_mac is not None:
|
||||
maas_node = self.singleton({'boot_mac': node_model.boot_mac})
|
||||
if len(nodes) == 1:
|
||||
maas_node = nodes[0]
|
||||
else:
|
||||
self.logger.debug("Error: Found %d nodes with MAC %s", len(nodes), node_model.boot_mac)
|
||||
maas_node = None
|
||||
|
||||
if maas_node is None:
|
||||
self.logger.info(
|
||||
"Could not locate node %s in MaaS" % node_model.name)
|
||||
return None
|
||||
|
||||
self.logger.debug("Found MaaS resource %s matching Node %s" %
|
||||
(maas_node.resource_id, node_model.get_id()))
|
||||
|
||||
if maas_node.hostname != node_model.name and update_name:
|
||||
try:
|
||||
maas_node.hostname = node_model.name
|
||||
maas_node.domain = domain
|
||||
maas_node.update()
|
||||
if node_model.oob_type == 'libvirt':
|
||||
self.logger.debug(
|
||||
"Updating node %s MaaS power parameters for libvirt." %
|
||||
(node_model.name))
|
||||
oob_params = node_model.oob_parameters
|
||||
maas_node.set_power_parameters(
|
||||
'virsh',
|
||||
power_address=oob_params.get('libvirt_uri'),
|
||||
power_id=node_model.name)
|
||||
self.logger.debug("Updated MaaS resource %s hostname to %s" %
|
||||
(maas_node.resource_id, node_model.name))
|
||||
except Exception as ex:
|
||||
self.logger.debug("Error updating MAAS node: %s" % str(ex))
|
||||
else:
|
||||
self.logger.debug("Found MaaS resource %s matching Node %s" %
|
||||
(maas_node.resource_id, node_model.get_id()))
|
||||
|
||||
return maas_node
|
||||
|
||||
def find_nodes_with_mac(self, mac_address):
|
||||
"""Find a list of nodes that own a NIC with ``mac_address``"""
|
||||
node_list = []
|
||||
for n in self.resources.values():
|
||||
if n.interface_for_mac(mac_address):
|
||||
node_list.append(n)
|
||||
return node_list
|
||||
|
||||
def query(self, query):
|
||||
"""Custom query method to deal with complex fields."""
|
||||
result = list(self.resources.values())
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
"""Model for MaaS rack-controller API resource."""
|
||||
|
||||
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||
import drydock_provisioner.error as errors
|
||||
import drydock_provisioner.drivers.node.maasdriver.models.machine as maas_machine
|
||||
|
||||
|
||||
@ -64,8 +64,25 @@ class RackController(maas_machine.Machine):
|
||||
|
||||
return svc_status
|
||||
|
||||
def update_identity(self, n, domain="local"):
|
||||
"""Cannot update rack controller identity."""
|
||||
self.logger.debug("Cannot update rack controller identity for %s, no-op." %
|
||||
self.hostname)
|
||||
return
|
||||
|
||||
class RackControllers(model_base.ResourceCollectionBase):
|
||||
def is_healthy(self):
|
||||
"""Check if this rack controller appears healthy based on service status."""
|
||||
rack_svc = self.get_services()
|
||||
healthy = True
|
||||
for s in rack_svc:
|
||||
if s in RackController.REQUIRED_SERVICES:
|
||||
# TODO(sh8121att) for dhcpd, ensure it is running if this rack controller
|
||||
# is a primary or secondary for a VLAN
|
||||
if rack_svc[s] not in ("running", "off"):
|
||||
healthy = False
|
||||
return healthy
|
||||
|
||||
class RackControllers(maas_machine.Machines):
|
||||
"""Model for a collection of rack controllers."""
|
||||
|
||||
collection_url = 'rackcontrollers/'
|
||||
@ -73,3 +90,7 @@ class RackControllers(model_base.ResourceCollectionBase):
|
||||
|
||||
def __init__(self, api_client, **kwargs):
|
||||
super().__init__(api_client)
|
||||
|
||||
def acquire_node(self, node_name):
|
||||
"""Acquire not valid for nodes that are Rack Controllers."""
|
||||
raise errors.DriverError("Rack controllers cannot be acquired.")
|
||||
|
@ -14,6 +14,7 @@
|
||||
"""Models representing MaaS VLAN resources."""
|
||||
|
||||
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||
from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerConflict
|
||||
|
||||
|
||||
class Vlan(model_base.ResourceBase):
|
||||
@ -65,6 +66,41 @@ class Vlan(model_base.ResourceBase):
|
||||
else:
|
||||
self.vid = int(new_vid)
|
||||
|
||||
def add_rack_controller(self, rack_id):
|
||||
"""Add a rack controller that manages DHCP on this VLAN.
|
||||
|
||||
Whichever of primary_rack or secondary_rack, in that order,
|
||||
is not set - set to ``rack_id``. If both are already set
|
||||
raise RackControllerConflict exception.
|
||||
"""
|
||||
if not self.primary_rack or self.primary_rack == rack_id:
|
||||
self.logger.debug("Setting primary DHCP controller %s on VLAN %s", rack_id, self.resource_id)
|
||||
self.primary_rack = rack_id
|
||||
elif not self.secondary_rack or self.secondary_rack == rack_id:
|
||||
self.logger.debug("Setting secondary DHCP controller %s on VLAN %s.", rack_id, self.resource_id)
|
||||
self.secondary_rack = rack_id
|
||||
else:
|
||||
raise RackControllerConflict(
|
||||
"Both primary and secondary rack controllers already set.")
|
||||
|
||||
def reset_dhcp_mgmt(self, commit=False):
|
||||
"""Reset the DHCP control for this VLAN.
|
||||
|
||||
Reset the settings in the model impacting DHCP control on this
|
||||
VLAN. Only commit these changes to the MAAS API if ``commit`` is
|
||||
True.
|
||||
|
||||
:param bool commit: Whether to commit reset to MAAS API
|
||||
"""
|
||||
self.logger.debug("Resetting DHCP control on VLAN %s.", self.resource_id)
|
||||
self.relay_vlan = None
|
||||
self.dhcp_on = False
|
||||
self.primary_rack = None
|
||||
self.secondary_rack = None
|
||||
|
||||
if commit:
|
||||
self.update()
|
||||
|
||||
def set_dhcp_relay(self, relay_vlan_id):
|
||||
self.relay_vlan = relay_vlan_id
|
||||
self.update()
|
||||
|
@ -53,12 +53,10 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
|
||||
site_design,
|
||||
state_manager,
|
||||
resolve_aliases=False):
|
||||
self.logger.debug("Applying host profile to node %s" % self.name)
|
||||
self.logger.debug("Compiling effective node model for %s" % self.name)
|
||||
self.apply_host_profile(site_design)
|
||||
self.logger.debug("Applying hardware profile to node %s" % self.name)
|
||||
self.apply_hardware_profile(site_design)
|
||||
self.source = hd_fields.ModelSource.Compiled
|
||||
self.logger.debug("Resolving kernel parameters on node %s" % self.name)
|
||||
self.resolve_kernel_params(site_design)
|
||||
if resolve_aliases:
|
||||
self.logger.debug(
|
||||
|
49
python/tests/unit/test_maasdriver_vlan.py
Normal file
49
python/tests/unit/test_maasdriver_vlan.py
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
'''Tests for the maasdriver node_results routine.'''
|
||||
import pytest
|
||||
|
||||
from drydock_provisioner.drivers.node.maasdriver.models.vlan import Vlan
|
||||
from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerConflict
|
||||
|
||||
|
||||
class TestMaasVlan():
|
||||
def test_add_rack_controller(self, mocker):
|
||||
'''Test vlan model method for setting a managing rack controller.'''
|
||||
|
||||
# A object to return that looks like a requests response
|
||||
# object wrapping a MAAS API response
|
||||
class MockedResponse():
|
||||
|
||||
status_code = 200
|
||||
|
||||
vlan_fields = {'name': 'test', 'dhcp_on': True, 'mtu': 1500}
|
||||
|
||||
primary_rack = "asdf79"
|
||||
secondary_rack = "asdf80"
|
||||
tertiary_rack = "asdf81"
|
||||
|
||||
api_client = mocker.MagicMock()
|
||||
api_client.get.return_value = MockedResponse()
|
||||
|
||||
vlan_obj = Vlan(api_client, **vlan_fields)
|
||||
|
||||
vlan_obj.add_rack_controller(primary_rack)
|
||||
assert vlan_obj.primary_rack == primary_rack
|
||||
|
||||
vlan_obj.add_rack_controller(secondary_rack)
|
||||
assert vlan_obj.secondary_rack == secondary_rack
|
||||
|
||||
with pytest.raises(RackControllerConflict):
|
||||
vlan_obj.add_rack_controller(tertiary_rack)
|
Loading…
x
Reference in New Issue
Block a user