Add deckhand API integration
- Detect schema 'deckhand+http' and trigger Deckhand logic - Add Keystone session integration - Generalize ReferenceResolver - Add unit tests for data reference resolution - Add Ingester plugin for Deckhand-based documents - Add unit tests for deckhand parser - Add unit test for validation - Fix BootactionReport action Change-Id: Iacaed208ea0813ecba52155ee0e4c25c87920a99
This commit is contained in:
parent
6d29229b09
commit
f368aa9fc0
@ -64,7 +64,7 @@ class DrydockConfig(object):
|
||||
'log_level', default='INFO', help='Global log level for Drydock'),
|
||||
cfg.StrOpt(
|
||||
'global_logger_name',
|
||||
default='drydock',
|
||||
default='drydock_provisioner',
|
||||
help='Logger name for the top-level logger'),
|
||||
cfg.StrOpt(
|
||||
'oobdriver_logger_name',
|
||||
|
@ -96,5 +96,6 @@ class VersionsResource(BaseResource):
|
||||
'v1.0': {
|
||||
'path': '/api/v1.0',
|
||||
'status': 'stable'
|
||||
}})
|
||||
}
|
||||
})
|
||||
resp.status = falcon.HTTP_200
|
||||
|
@ -122,8 +122,7 @@ class BootactionResource(StatefulResource):
|
||||
resp.status = falcon.HTTP_200
|
||||
resp.content_type = 'application/json'
|
||||
ba_entry['task_id'] = str(ba_entry['task_id'])
|
||||
ba_entry['action_id'] = ulid2.encode_ulid_base32(
|
||||
ba_entry['action_id'])
|
||||
ba_entry['action_id'] = ulid2.encode_ulid_base32(ba_entry['action_id'])
|
||||
resp.body = json.dumps(ba_entry)
|
||||
return
|
||||
|
||||
|
@ -85,10 +85,11 @@ class Ingester(object):
|
||||
raise ValueError(
|
||||
"Ingester:ingest_data required kwarg 'design_ref' missing")
|
||||
|
||||
design_blob = design_state.get_design_documents(design_ref)
|
||||
self.logger.debug(
|
||||
"Ingester:ingest_data ingesting design parts for design %s" %
|
||||
design_ref)
|
||||
design_blob = design_state.get_design_documents(design_ref)
|
||||
self.logger.debug("Ingesting design data of %d bytes." % len(design_blob))
|
||||
|
||||
try:
|
||||
status, design_items = self.registered_plugin.ingest_data(
|
||||
|
649
drydock_provisioner/ingester/plugins/deckhand.py
Normal file
649
drydock_provisioner/ingester/plugins/deckhand.py
Normal file
@ -0,0 +1,649 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""This data ingester will consume YAML site topology documents."""
|
||||
|
||||
import yaml
|
||||
import logging
|
||||
import jsonschema
|
||||
import os
|
||||
import pkg_resources
|
||||
import copy
|
||||
|
||||
import drydock_provisioner.objects.fields as hd_fields
|
||||
|
||||
from drydock_provisioner import error as errors
|
||||
from drydock_provisioner import objects
|
||||
from drydock_provisioner.ingester.plugins import IngesterPlugin
|
||||
|
||||
|
||||
class DeckhandIngester(IngesterPlugin):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.logger = logging.getLogger('drydock.ingester.deckhand')
|
||||
|
||||
self.load_schemas()
|
||||
|
||||
def get_name(self):
|
||||
return "deckhand"
|
||||
|
||||
def ingest_data(self, **kwargs):
|
||||
"""Parse and save design data.
|
||||
|
||||
:param content: String of valid Deckhand YAML
|
||||
|
||||
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
|
||||
"""
|
||||
if 'content' in kwargs:
|
||||
parse_status, models = self.parse_docs(kwargs.get('content'))
|
||||
else:
|
||||
raise ValueError('Missing parameter "content"')
|
||||
|
||||
return parse_status, models
|
||||
|
||||
def parse_docs(self, doc_blob):
|
||||
"""Translate a YAML string into the internal Drydock model.
|
||||
|
||||
Returns a tuple of a objects.TaskStatus instance to summarize all
|
||||
document processing and a list of models yielded by successful processing
|
||||
|
||||
:param doc_blob: bytes representing a utf-8 encoded YAML string
|
||||
"""
|
||||
models = []
|
||||
yaml_string = doc_blob.decode()
|
||||
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
|
||||
try:
|
||||
parsed_data = yaml.safe_load_all(yaml_string)
|
||||
except yaml.YAMLError as err:
|
||||
if hasattr(err, 'problem_mark'):
|
||||
mark = err.problem_mark
|
||||
raise errors.IngesterError(
|
||||
"Error parsing YAML at (l:%s, c:%s): %s" %
|
||||
(mark.line + 1, mark.column + 1, err))
|
||||
else:
|
||||
raise errors.IngesterError("Error parsing YAML: %s" % (err))
|
||||
|
||||
# tracking processing status to provide a complete summary of issues
|
||||
ps = objects.TaskStatus()
|
||||
ps.set_status(hd_fields.ActionResult.Success)
|
||||
for d in parsed_data:
|
||||
try:
|
||||
(schema_ns, doc_kind, doc_version) = d.get('schema', '').split('/')
|
||||
except ValueError as ex:
|
||||
self.logger.error("Error with document structure.", exc_info=ex)
|
||||
self.logger.debug("Error document\n%s" % yaml.dump(d))
|
||||
continue
|
||||
if schema_ns == 'drydock':
|
||||
try:
|
||||
doc_errors = self.validate_drydock_document(d)
|
||||
if len(doc_errors) > 0:
|
||||
doc_ctx = d.get('metadata', {}).get('name', 'Unknown')
|
||||
for e in doc_errors:
|
||||
ps.add_status_msg(
|
||||
msg="%s:%s validation error: %s" %
|
||||
(doc_kind, doc_version, e),
|
||||
error=True,
|
||||
ctx_type='document',
|
||||
ctx=doc_ctx)
|
||||
ps.set_status(hd_fields.ActionResult.Failure)
|
||||
continue
|
||||
model = self.process_drydock_document(d)
|
||||
ps.add_status_msg(
|
||||
msg="Successfully processed Drydock document type %s."
|
||||
% doc_kind,
|
||||
error=False,
|
||||
ctx_type='document',
|
||||
ctx=model.get_id())
|
||||
models.append(model)
|
||||
except errors.IngesterError as ie:
|
||||
msg = "Error processing document: %s" % str(ie)
|
||||
self.logger.warning(msg)
|
||||
if d.get('metadata', {}).get('name', None) is not None:
|
||||
ctx = d.get('metadata').get('name')
|
||||
else:
|
||||
ctx = 'Unknown'
|
||||
ps.add_status_msg(
|
||||
msg=msg, error=True, ctx_type='document', ctx=ctx)
|
||||
ps.set_status(hd_fields.ActionResult.Failure)
|
||||
except Exception as ex:
|
||||
msg = "Unexpected error processing document: %s" % str(ex)
|
||||
self.logger.error(msg, exc_info=True)
|
||||
if d.get('metadata', {}).get('name', None) is not None:
|
||||
ctx = d.get('metadata').get('name')
|
||||
else:
|
||||
ctx = 'Unknown'
|
||||
ps.add_status_msg(
|
||||
msg=msg, error=True, ctx_type='document', ctx=ctx)
|
||||
ps.set_status(hd_fields.ActionResult.Failure)
|
||||
return (ps, models)
|
||||
|
||||
def process_drydock_document(self, doc):
|
||||
"""Process a parsed YAML document.
|
||||
|
||||
:param doc: The dictionary from parsing the YAML document
|
||||
"""
|
||||
(schema_ns, kind, version) = doc.get('schema', '').split('/')
|
||||
if version == 'v1':
|
||||
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
|
||||
else:
|
||||
doc_processor = None
|
||||
|
||||
if doc_processor is None:
|
||||
raise errors.IngesterError(
|
||||
"Invalid document - Kind %s and Version %s" % (kind, version))
|
||||
metadata = doc.get('metadata', {})
|
||||
doc_name = metadata.get('name')
|
||||
return doc_processor(self, doc_name, doc.get('data', {}))
|
||||
|
||||
def validate_drydock_document(self, doc):
|
||||
"""Validate a parsed document via jsonschema.
|
||||
|
||||
If a schema for a document Kind is not available, the document is
|
||||
considered valid. Schema is chosen by the doc['kind'] field.
|
||||
|
||||
Returns a empty list for valid documents, otherwise returns a list
|
||||
of all found errors
|
||||
|
||||
:param doc: dictionary of the parsed document.
|
||||
"""
|
||||
schemaname = doc.get('schema', '')
|
||||
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
|
||||
|
||||
errors_found = []
|
||||
|
||||
if doc_version == 'v1':
|
||||
if schemaname in self.v1_doc_schemas:
|
||||
validator = jsonschema.Draft4Validator(
|
||||
self.v1_doc_schemas.get(schemaname))
|
||||
for error in validator.iter_errors(doc.get('data', [])):
|
||||
errors_found.append(error.message)
|
||||
|
||||
return errors_found
|
||||
|
||||
def process_drydock_region(self, name, data):
|
||||
"""Process the data/spec section of a Region document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.Site()
|
||||
|
||||
# Need to add validation logic, we'll assume the input is
|
||||
# valid for now
|
||||
model.name = name
|
||||
model.status = hd_fields.SiteStatus.Unknown
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
model.tag_definitions = objects.NodeTagDefinitionList()
|
||||
|
||||
tag_defs = data.get('tag_definitions', [])
|
||||
|
||||
for t in tag_defs:
|
||||
tag_model = objects.NodeTagDefinition()
|
||||
tag_model.tag = t.get('tag', '')
|
||||
tag_model.type = t.get('definition_type', '')
|
||||
tag_model.definition = t.get('definition', '')
|
||||
|
||||
if tag_model.type not in ['lshw_xpath']:
|
||||
raise errors.IngesterError('Unknown definition_type in '
|
||||
'tag_definition instance: %s' %
|
||||
(t.definition_type))
|
||||
model.tag_definitions.append(tag_model)
|
||||
|
||||
auth_keys = data.get('authorized_keys', [])
|
||||
|
||||
model.authorized_keys = [k for k in auth_keys]
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_rack(self, name, data):
|
||||
"""Process the data/spec section of a Rack document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.Rack()
|
||||
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
model.name = name
|
||||
|
||||
model.tor_switches = objects.TorSwitchList()
|
||||
tors = data.get('tor_switches', {})
|
||||
|
||||
for k, v in tors.items():
|
||||
tor = objects.TorSwitch()
|
||||
tor.switch_name = k
|
||||
tor.mgmt_ip = v.get('mgmt_ip', None)
|
||||
tor.sdn_api_uri = v.get('sdn_api_url', None)
|
||||
model.tor_switches.append(tor)
|
||||
|
||||
model.location = copy.deepcopy(data.get('location', {}))
|
||||
|
||||
model.local_networks = [n for n in data.get('local_networks', [])]
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_networklink(self, name, data):
|
||||
"""Process the data/spec section of a NetworkLink document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.NetworkLink()
|
||||
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
model.name = name
|
||||
|
||||
model.metalabels = data.get('labels', {})
|
||||
|
||||
bonding = data.get('bonding', {})
|
||||
|
||||
model.bonding_mode = bonding.get(
|
||||
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
|
||||
|
||||
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
|
||||
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
|
||||
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
|
||||
model.bonding_mon_rate = bonding.get('mon_rate', '100')
|
||||
model.bonding_up_delay = bonding.get('up_delay', '200')
|
||||
model.bonding_down_delay = bonding.get('down_delay', '200')
|
||||
|
||||
model.mtu = data.get('mtu', None)
|
||||
model.linkspeed = data.get('linkspeed', None)
|
||||
|
||||
trunking = data.get('trunking', {})
|
||||
model.trunk_mode = trunking.get(
|
||||
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
|
||||
model.native_network = trunking.get('default_network', None)
|
||||
|
||||
model.allowed_networks = data.get('allowed_networks', None)
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_network(self, name, data):
|
||||
"""Process the data/spec section of a Network document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.Network()
|
||||
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
model.name = name
|
||||
|
||||
model.metalabels = data.get('labels', {})
|
||||
|
||||
model.cidr = data.get('cidr', None)
|
||||
model.vlan_id = data.get('vlan', None)
|
||||
model.mtu = data.get('mtu', None)
|
||||
|
||||
dns = data.get('dns', {})
|
||||
model.dns_domain = dns.get('domain', 'local')
|
||||
model.dns_servers = dns.get('servers', None)
|
||||
|
||||
ranges = data.get('ranges', [])
|
||||
model.ranges = []
|
||||
|
||||
for r in ranges:
|
||||
model.ranges.append({
|
||||
'type': r.get('type', None),
|
||||
'start': r.get('start', None),
|
||||
'end': r.get('end', None),
|
||||
})
|
||||
|
||||
routes = data.get('routes', [])
|
||||
model.routes = []
|
||||
|
||||
for r in routes:
|
||||
model.routes.append({
|
||||
'subnet': r.get('subnet', None),
|
||||
'gateway': r.get('gateway', None),
|
||||
'metric': r.get('metric', None),
|
||||
})
|
||||
|
||||
dhcp_relay = data.get('dhcp_relay', None)
|
||||
|
||||
if dhcp_relay is not None:
|
||||
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
|
||||
model.dhcp_relay_upstream_target = dhcp_relay.get(
|
||||
'upstream_target', None)
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_hwprofile(self, name, data):
|
||||
"""Process the data/spec section of a HardwareProfile document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.HardwareProfile()
|
||||
|
||||
model.name = name
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
model.vendor = data.get('vendor', None)
|
||||
model.generation = data.get('generation', None)
|
||||
model.hw_version = data.get('hw_version', None)
|
||||
model.bios_version = data.get('bios_version', None)
|
||||
model.boot_mode = data.get('boot_mode', None)
|
||||
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
|
||||
model.pxe_interface = data.get('pxe_interface', None)
|
||||
|
||||
model.devices = objects.HardwareDeviceAliasList()
|
||||
|
||||
device_aliases = data.get('device_aliases', {})
|
||||
|
||||
for d, v in device_aliases.items():
|
||||
dev_model = objects.HardwareDeviceAlias()
|
||||
dev_model.source = hd_fields.ModelSource.Designed
|
||||
dev_model.alias = d
|
||||
dev_model.bus_type = v.get('bus_type', None)
|
||||
dev_model.dev_type = v.get('dev_type', None)
|
||||
dev_model.address = v.get('address', None)
|
||||
model.devices.append(dev_model)
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_hostprofile(self, name, data):
|
||||
"""Process the data/spec section of a HostProfile document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.HostProfile()
|
||||
model.name = name
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
self.process_host_common_fields(data, model)
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_bootaction(self, name, data):
|
||||
"""Process the data/spec section of a BootAction document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:Param data: the dictionary of the parsed data/spec section
|
||||
"""
|
||||
model = objects.BootAction()
|
||||
model.name = name
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
assets = data.get('assets')
|
||||
|
||||
model.asset_list = objects.BootActionAssetList()
|
||||
|
||||
for a in assets:
|
||||
ba = self.process_bootaction_asset(a)
|
||||
model.asset_list.append(ba)
|
||||
|
||||
node_filter = data.get('node_filter', None)
|
||||
|
||||
if node_filter is not None:
|
||||
nfs = self.process_bootaction_nodefilter(node_filter)
|
||||
model.node_filter = nfs
|
||||
|
||||
return model
|
||||
|
||||
def process_bootaction_asset(self, asset_dict):
|
||||
"""Process a dictionary representing a BootAction Data Asset.
|
||||
|
||||
:param asset_dict: dictionary representing the bootaction asset
|
||||
"""
|
||||
model = objects.BootActionAsset(**asset_dict)
|
||||
return model
|
||||
|
||||
def process_bootaction_nodefilter(self, nf):
|
||||
"""Process a dictionary representing a BootAction NodeFilter Set.
|
||||
|
||||
:param nf: dictionary representing the bootaction nodefilter set.
|
||||
"""
|
||||
model = objects.NodeFilterSet()
|
||||
model.filter_set_type = nf.get('filter_set_type', None)
|
||||
model.filter_set = []
|
||||
|
||||
for nf in nf.get('filter_set', []):
|
||||
nf_model = objects.NodeFilter(**nf)
|
||||
model.filter_set.append(nf_model)
|
||||
|
||||
return model
|
||||
|
||||
def process_drydock_node(self, name, data):
|
||||
"""Process the data/spec section of a BaremetalNode document.
|
||||
|
||||
:param name: the document name attribute
|
||||
:param data: the dictionary of the data/spec section
|
||||
"""
|
||||
model = objects.BaremetalNode()
|
||||
model.name = name
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
self.process_host_common_fields(data, model)
|
||||
|
||||
node_metadata = data.get('metadata', {})
|
||||
model.boot_mac = node_metadata.get('boot_mac', None)
|
||||
|
||||
addresses = data.get('addressing', [])
|
||||
|
||||
if len(addresses) == 0:
|
||||
raise errors.IngesterError('BaremetalNode needs at least'
|
||||
' 1 assigned address')
|
||||
|
||||
model.addressing = objects.IpAddressAssignmentList()
|
||||
|
||||
for a in addresses:
|
||||
assignment = objects.IpAddressAssignment()
|
||||
|
||||
address = a.get('address', '')
|
||||
if address == 'dhcp':
|
||||
assignment.type = 'dhcp'
|
||||
assignment.address = None
|
||||
assignment.network = a.get('network')
|
||||
|
||||
model.addressing.append(assignment)
|
||||
elif address != '':
|
||||
assignment.type = 'static'
|
||||
assignment.address = a.get('address')
|
||||
assignment.network = a.get('network')
|
||||
|
||||
model.addressing.append(assignment)
|
||||
else:
|
||||
self.log.error("Invalid address assignment %s on Node %s" %
|
||||
(address, self.name))
|
||||
|
||||
return model
|
||||
|
||||
def process_host_common_fields(self, data, model):
|
||||
"""Process fields common to the host-based documents.
|
||||
|
||||
Update the provided model with the values of fields common
|
||||
to BaremetalNode and HostProfile documents.
|
||||
|
||||
:param data: dictionary from YAML parsing of the document data/spec section
|
||||
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
|
||||
"""
|
||||
model.parent_profile = data.get('host_profile', None)
|
||||
model.hardware_profile = data.get('hardware_profile', None)
|
||||
|
||||
oob = data.get('oob', {})
|
||||
|
||||
model.oob_parameters = {}
|
||||
for k, v in oob.items():
|
||||
if k == 'type':
|
||||
model.oob_type = oob.get('type', None)
|
||||
else:
|
||||
model.oob_parameters[k] = v
|
||||
|
||||
(model.storage_devices,
|
||||
model.volume_groups) = self.process_node_storage(
|
||||
data.get('storage', {}))
|
||||
|
||||
interfaces = data.get('interfaces', {})
|
||||
model.interfaces = objects.HostInterfaceList()
|
||||
|
||||
for k, v in interfaces.items():
|
||||
int_model = objects.HostInterface()
|
||||
|
||||
# A null value indicates this interface should be removed
|
||||
# from any parent profiles
|
||||
if v is None:
|
||||
int_model.device_name = '!' + k
|
||||
continue
|
||||
|
||||
int_model.device_name = k
|
||||
int_model.network_link = v.get('device_link', None)
|
||||
|
||||
int_model.hardware_slaves = []
|
||||
slaves = v.get('slaves', [])
|
||||
|
||||
for s in slaves:
|
||||
int_model.hardware_slaves.append(s)
|
||||
|
||||
int_model.networks = []
|
||||
networks = v.get('networks', [])
|
||||
|
||||
for n in networks:
|
||||
int_model.networks.append(n)
|
||||
|
||||
model.interfaces.append(int_model)
|
||||
|
||||
platform = data.get('platform', {})
|
||||
|
||||
model.image = platform.get('image', None)
|
||||
model.kernel = platform.get('kernel', None)
|
||||
|
||||
model.kernel_params = {}
|
||||
for k, v in platform.get('kernel_params', {}).items():
|
||||
model.kernel_params[k] = v
|
||||
|
||||
model.primary_network = data.get('primary_network', None)
|
||||
|
||||
node_metadata = data.get('metadata', {})
|
||||
metadata_tags = node_metadata.get('tags', [])
|
||||
|
||||
model.tags = metadata_tags
|
||||
|
||||
owner_data = node_metadata.get('owner_data', {})
|
||||
model.owner_data = {}
|
||||
|
||||
for k, v in owner_data.items():
|
||||
model.owner_data[k] = v
|
||||
|
||||
model.rack = node_metadata.get('rack', None)
|
||||
|
||||
return model
|
||||
|
||||
def process_node_storage(self, storage):
|
||||
"""Process the storage data for a node-based document.
|
||||
|
||||
Return a tuple of of two lists the first is a StorageDeviceList, the
|
||||
second is a VolumeGroupList.
|
||||
|
||||
:param storage: dictionary of the storage section of a document
|
||||
"""
|
||||
phys_devs = storage.get('physical_devices', {})
|
||||
|
||||
storage_devices = objects.HostStorageDeviceList()
|
||||
|
||||
for k, v in phys_devs.items():
|
||||
sd = objects.HostStorageDevice(name=k)
|
||||
sd.source = hd_fields.ModelSource.Designed
|
||||
|
||||
if 'labels' in v:
|
||||
sd.labels = v.get('labels').copy()
|
||||
|
||||
if 'volume_group' in v:
|
||||
vg = v.get('volume_group')
|
||||
sd.volume_group = vg
|
||||
elif 'partitions' in v:
|
||||
sd.partitions = objects.HostPartitionList()
|
||||
for vv in v.get('partitions', []):
|
||||
part_model = objects.HostPartition()
|
||||
|
||||
part_model.name = vv.get('name')
|
||||
part_model.source = hd_fields.ModelSource.Designed
|
||||
part_model.part_uuid = vv.get('part_uuid', None)
|
||||
part_model.size = vv.get('size', None)
|
||||
|
||||
if 'labels' in vv:
|
||||
part_model.labels = vv.get('labels').copy()
|
||||
|
||||
if 'volume_group' in vv:
|
||||
part_model.volume_group = vv.get('vg')
|
||||
elif 'filesystem' in vv:
|
||||
fs_info = vv.get('filesystem', {})
|
||||
part_model.mountpoint = fs_info.get('mountpoint', None)
|
||||
part_model.fstype = fs_info.get('fstype', 'ext4')
|
||||
part_model.mount_options = fs_info.get(
|
||||
'mount_options', 'defaults')
|
||||
part_model.fs_uuid = fs_info.get('fs_uuid', None)
|
||||
part_model.fs_label = fs_info.get('fs_label', None)
|
||||
|
||||
sd.partitions.append(part_model)
|
||||
storage_devices.append(sd)
|
||||
|
||||
volume_groups = objects.HostVolumeGroupList()
|
||||
vol_groups = storage.get('volume_groups', {})
|
||||
|
||||
for k, v in vol_groups.items():
|
||||
vg = objects.HostVolumeGroup(name=k)
|
||||
vg.vg_uuid = v.get('vg_uuid', None)
|
||||
vg.logical_volumes = objects.HostVolumeList()
|
||||
volume_groups.append(vg)
|
||||
for vv in v.get('logical_volumes', []):
|
||||
lv = objects.HostVolume(name=vv.get('name'))
|
||||
lv.size = vv.get('size', None)
|
||||
lv.lv_uuid = vv.get('lv_uuid', None)
|
||||
if 'filesystem' in vv:
|
||||
fs_info = vv.get('filesystem', {})
|
||||
lv.mountpoint = fs_info.get('mountpoint', None)
|
||||
lv.fstype = fs_info.get('fstype', 'ext4')
|
||||
lv.mount_options = fs_info.get('mount_options', 'defaults')
|
||||
lv.fs_uuid = fs_info.get('fs_uuid', None)
|
||||
lv.fs_label = fs_info.get('fs_label', None)
|
||||
|
||||
vg.logical_volumes.append(lv)
|
||||
|
||||
return (storage_devices, volume_groups)
|
||||
|
||||
def load_schemas(self):
|
||||
self.v1_doc_schemas = dict()
|
||||
schema_dir = self._get_schema_dir()
|
||||
|
||||
for schema_file in os.listdir(schema_dir):
|
||||
f = open(os.path.join(schema_dir, schema_file), 'r')
|
||||
for schema in yaml.safe_load_all(f):
|
||||
schema_for = schema['metadata']['name']
|
||||
if schema_for in self.v1_doc_schemas:
|
||||
self.logger.warning(
|
||||
"Duplicate document schemas found for document kind %s."
|
||||
% schema_for)
|
||||
self.logger.debug(
|
||||
"Loaded schema for document kind %s." % schema_for)
|
||||
self.v1_doc_schemas[schema_for] = schema.get('data')
|
||||
f.close()
|
||||
|
||||
def _get_schema_dir(self):
|
||||
return pkg_resources.resource_filename('drydock_provisioner',
|
||||
'schemas')
|
||||
|
||||
# Mapping of handlers for different document kinds
|
||||
v1_doc_handlers = {
|
||||
'Region': process_drydock_region,
|
||||
'Rack': process_drydock_rack,
|
||||
'NetworkLink': process_drydock_networklink,
|
||||
'Network': process_drydock_network,
|
||||
'HardwareProfile': process_drydock_hwprofile,
|
||||
'HostProfile': process_drydock_hostprofile,
|
||||
'BaremetalNode': process_drydock_node,
|
||||
'BootAction': process_drydock_bootaction,
|
||||
}
|
@ -12,12 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Object models for BootActions."""
|
||||
import requests
|
||||
import base64
|
||||
from jinja2 import Template
|
||||
from urllib.parse import urlparse
|
||||
from urllib.parse import urlunparse
|
||||
import re
|
||||
import ulid2
|
||||
|
||||
import oslo_versionedobjects.fields as ovo_fields
|
||||
@ -28,7 +24,7 @@ import drydock_provisioner.objects.fields as hd_fields
|
||||
import drydock_provisioner.config as config
|
||||
import drydock_provisioner.error as errors
|
||||
|
||||
from drydock_provisioner.util import KeystoneUtils
|
||||
from drydock_provisioner.statemgmt.design.resolver import ReferenceResolver
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
@ -176,33 +172,12 @@ class BootActionAsset(base.DrydockObject):
|
||||
|
||||
:param asset_url: URL to retrieve the data asset from
|
||||
"""
|
||||
url_parts = urlparse(asset_url)
|
||||
|
||||
if url_parts.scheme in ['http', 'https']:
|
||||
try:
|
||||
resp = requests.get(asset_url)
|
||||
except Exception as ex:
|
||||
raise errors.InvalidAssetLocation(
|
||||
"Failed retrieving asset: %s - %s" % (type(ex).__name__,
|
||||
str(ex)))
|
||||
return resp.content
|
||||
elif url_parts.scheme in [
|
||||
'promenade+http', 'promenade+https', 'deckhand+http',
|
||||
'deckhand+https'
|
||||
]:
|
||||
try:
|
||||
ks_sess = KeystoneUtils.get_session()
|
||||
url_parts.scheme = re.sub('^[^+]+\+', '', url_parts.scheme)
|
||||
new_url = urlunparse(url_parts)
|
||||
resp = ks_sess.get(new_url)
|
||||
except Exception as ex:
|
||||
raise errors.InvalidAssetLocation(
|
||||
"Failed retrieving asset: %s - %s" % (type(ex).__name__,
|
||||
str(ex)))
|
||||
return resp.content
|
||||
else:
|
||||
try:
|
||||
return ReferenceResolver.resolve_reference(asset_url)
|
||||
except Exception as ex:
|
||||
raise errors.InvalidAssetLocation(
|
||||
"Unknown scheme %s" % url_parts.scheme)
|
||||
"Unable to resolve asset reference %s: %s" % (asset_url,
|
||||
str(ex)))
|
||||
|
||||
def execute_pipeline(self, data, pipeline, tpl_ctx=None):
|
||||
"""Execute a pipeline against a data element.
|
||||
|
@ -30,7 +30,7 @@ class OrchestratorAction(BaseDrydockEnum):
|
||||
PrepareNodes = 'prepare_nodes'
|
||||
DeployNodes = 'deploy_nodes'
|
||||
DestroyNodes = 'destroy_nodes'
|
||||
BootactionReports = 'bootaction_reports'
|
||||
BootactionReport = 'bootaction_report'
|
||||
|
||||
# OOB driver actions
|
||||
ValidateOobServices = 'validate_oob_services'
|
||||
@ -64,7 +64,7 @@ class OrchestratorAction(BaseDrydockEnum):
|
||||
ConfigurePortProduction = 'config_port_production'
|
||||
|
||||
ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNodes,
|
||||
PrepareNodes, DeployNodes, BootactionReports, DestroyNodes,
|
||||
PrepareNodes, DeployNodes, BootactionReport, DestroyNodes,
|
||||
ConfigNodePxe, SetNodeBoot, PowerOffNode, PowerOnNode,
|
||||
PowerCycleNode, InterrogateOob, CreateNetworkTemplate,
|
||||
CreateStorageTemplate, CreateBootMedia, PrepareHardwareConfig,
|
||||
|
@ -30,7 +30,7 @@ class NetworkLink(base.DrydockPersistentObject, base.DrydockObject):
|
||||
'site':
|
||||
ovo_fields.StringField(),
|
||||
'metalabels':
|
||||
ovo_fields.ListOfStringsField(nullable=True),
|
||||
ovo_fields.DictOfNullableStringsField(),
|
||||
'bonding_mode':
|
||||
hd_fields.NetworkLinkBondingModeField(
|
||||
default=hd_fields.NetworkLinkBondingMode.Disabled),
|
||||
@ -86,7 +86,7 @@ class Network(base.DrydockPersistentObject, base.DrydockObject):
|
||||
fields = {
|
||||
'name': ovo_fields.StringField(),
|
||||
'site': ovo_fields.StringField(),
|
||||
'metalabels': ovo_fields.ListOfStringsField(nullable=True),
|
||||
'metalabels': ovo_fields.DictOfNullableStringsField(),
|
||||
'cidr': ovo_fields.StringField(),
|
||||
'allocation_strategy': ovo_fields.StringField(),
|
||||
'vlan_id': ovo_fields.StringField(nullable=True),
|
||||
|
@ -727,8 +727,8 @@ class DeployNodes(BaseAction):
|
||||
design_ref=self.task.design_ref,
|
||||
action=hd_fields.OrchestratorAction.BootactionReport,
|
||||
node_filter=node_deploy_task.node_filter_from_successes())
|
||||
action = BootactionReports(node_bootaction_task, self.orchestrator,
|
||||
self.state_manager)
|
||||
action = BootactionReport(node_bootaction_task, self.orchestrator,
|
||||
self.state_manager)
|
||||
action.start()
|
||||
|
||||
self.task.align_result(
|
||||
@ -738,7 +738,7 @@ class DeployNodes(BaseAction):
|
||||
return
|
||||
|
||||
|
||||
class BootactionReports(BaseAction):
|
||||
class BootactionReport(BaseAction):
|
||||
"""Wait for nodes to report status of boot action."""
|
||||
|
||||
def start(self):
|
||||
|
@ -20,6 +20,7 @@ import uuid
|
||||
import ulid2
|
||||
import concurrent.futures
|
||||
import os
|
||||
import yaml
|
||||
|
||||
import drydock_provisioner.config as config
|
||||
import drydock_provisioner.objects as objects
|
||||
@ -278,8 +279,8 @@ class Orchestrator(object):
|
||||
|
||||
if result_status is not None:
|
||||
result_status = objects.TaskStatus()
|
||||
result_status.set_status(hd_fields.ActionResult.Success)
|
||||
|
||||
result_status.set_status(hd_fields.ActionResult.Success)
|
||||
return result_status
|
||||
|
||||
def get_effective_site(self, design_ref):
|
||||
@ -297,7 +298,8 @@ class Orchestrator(object):
|
||||
if status.status == hd_fields.ActionResult.Success:
|
||||
self.compute_model_inheritance(site_design)
|
||||
self.compute_bootaction_targets(site_design)
|
||||
status = self._validate_design(site_design, result_status=status)
|
||||
status = self._validate_design(site_design, result_status=status)
|
||||
self.logger.debug("Status of effective design:\n%s" % yaml.dump(status.to_dict()))
|
||||
except Exception as ex:
|
||||
if status is not None:
|
||||
status.add_status_msg(
|
||||
@ -308,13 +310,6 @@ class Orchestrator(object):
|
||||
status.set_status(hd_fields.ActionResult.Failure)
|
||||
self.logger.error(
|
||||
"Error getting site definition: %s" % str(ex), exc_info=ex)
|
||||
else:
|
||||
status.add_status_msg(
|
||||
msg="Successfully computed effective design.",
|
||||
error=False,
|
||||
ctx_type='NA',
|
||||
ctx='NA')
|
||||
status.set_status(hd_fields.ActionResult.Success)
|
||||
|
||||
return status, site_design
|
||||
|
||||
|
@ -14,51 +14,49 @@
|
||||
"""Module for resolving design references."""
|
||||
|
||||
import urllib.parse
|
||||
import re
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from drydock_provisioner import error as errors
|
||||
from drydock_provisioner.util import KeystoneUtils
|
||||
|
||||
|
||||
class DesignResolver(object):
|
||||
"""Class for handling different design references to resolve them to a design document."""
|
||||
class ReferenceResolver(object):
|
||||
"""Class for handling different data references to resolve them data."""
|
||||
|
||||
def __init__(self):
|
||||
self.scheme_handlers = {
|
||||
'http': self.resolve_reference_http,
|
||||
'file': self.resolve_reference_file,
|
||||
'https': self.resolve_reference_http,
|
||||
'deckhand+http': self.resolve_reference_deckhand,
|
||||
}
|
||||
|
||||
def resolve_reference(self, design_ref):
|
||||
@classmethod
|
||||
def resolve_reference(cls, design_ref):
|
||||
"""Resolve a reference to a design document.
|
||||
|
||||
Locate a schema handler based on the URI scheme of the design reference
|
||||
and use that handler to get the design document referenced.
|
||||
Locate a schema handler based on the URI scheme of the data reference
|
||||
and use that handler to get the data referenced.
|
||||
|
||||
:param design_ref: A URI-formatted reference to a design document
|
||||
:param design_ref: A URI-formatted reference to a data entity
|
||||
"""
|
||||
try:
|
||||
design_uri = urllib.parse.urlparse(design_ref)
|
||||
|
||||
handler = self.scheme_handlers.get(design_uri.scheme, None)
|
||||
handler = cls.scheme_handlers.get(design_uri.scheme, None)
|
||||
|
||||
if handler is None:
|
||||
raise errors.InvalidDesignReference(
|
||||
"Invalid reference scheme %s: no handler." %
|
||||
design_uri.scheme)
|
||||
else:
|
||||
return handler(design_uri)
|
||||
# Have to do a little magic to call the classmethod as a pointer
|
||||
return handler.__get__(None, cls)(design_uri)
|
||||
except ValueError:
|
||||
raise errors.InvalidDesignReference(
|
||||
"Cannot resolve design reference %s: unable to parse as valid URI."
|
||||
% design_ref)
|
||||
|
||||
def resolve_reference_http(self, design_uri):
|
||||
@classmethod
|
||||
def resolve_reference_http(cls, design_uri):
|
||||
"""Retrieve design documents from http/https endpoints.
|
||||
|
||||
Return a byte array of the design document. Support unsecured or
|
||||
Return a byte array of the response content. Support unsecured or
|
||||
basic auth
|
||||
|
||||
:param design_uri: Tuple as returned by urllib.parse for the design reference
|
||||
@ -73,10 +71,11 @@ class DesignResolver(object):
|
||||
|
||||
return response.content
|
||||
|
||||
def resolve_reference_file(self, design_uri):
|
||||
@classmethod
|
||||
def resolve_reference_file(cls, design_uri):
|
||||
"""Retrieve design documents from local file endpoints.
|
||||
|
||||
Return a byte array of the design document.
|
||||
Return a byte array of the file contents
|
||||
|
||||
:param design_uri: Tuple as returned by urllib.parse for the design reference
|
||||
"""
|
||||
@ -85,13 +84,31 @@ class DesignResolver(object):
|
||||
doc = f.read()
|
||||
return doc
|
||||
|
||||
def resolve_reference_deckhand(self, design_uri):
|
||||
"""Retrieve design documents from Deckhand endpoints.
|
||||
@classmethod
|
||||
def resolve_reference_ucp(cls, design_uri):
|
||||
"""Retrieve artifacts from a UCP service endpoint.
|
||||
|
||||
Return a byte array of the design document. Assumes Keystone
|
||||
Return a byte array of the response content. Assumes Keystone
|
||||
authentication required.
|
||||
|
||||
:param design_uri: Tuple as returned by urllib.parse for the design reference
|
||||
"""
|
||||
raise errors.InvalidDesignReference(
|
||||
"Deckhand references not currently supported.")
|
||||
ks_sess = KeystoneUtils.get_session()
|
||||
(new_scheme, foo) = re.subn('^[^+]+\+', '', design_uri.scheme)
|
||||
url = urllib.parse.urlunparse((new_scheme, design_uri.netloc, design_uri.path,
|
||||
design_uri.params, design_uri.query, design_uri.fragment))
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug("Calling Keystone session for url %s" % str(url))
|
||||
resp = ks_sess.get(url)
|
||||
if resp.status_code >= 400:
|
||||
raise errors.InvalidDesignReference(
|
||||
"Received error code for reference %s: %s - %s" % (url, str(resp.status_code), resp.text))
|
||||
return resp.content
|
||||
|
||||
scheme_handlers = {
|
||||
'http': resolve_reference_http,
|
||||
'file': resolve_reference_file,
|
||||
'https': resolve_reference_http,
|
||||
'deckhand+http': resolve_reference_ucp,
|
||||
'promenade+http': resolve_reference_ucp,
|
||||
}
|
||||
|
@ -26,9 +26,9 @@ import drydock_provisioner.objects as objects
|
||||
import drydock_provisioner.objects.fields as hd_fields
|
||||
|
||||
from .db import tables
|
||||
from .design import resolver
|
||||
|
||||
from drydock_provisioner import config
|
||||
from .design.resolver import ReferenceResolver
|
||||
|
||||
|
||||
class DrydockState(object):
|
||||
@ -36,7 +36,6 @@ class DrydockState(object):
|
||||
self.logger = logging.getLogger(
|
||||
config.config_mgr.conf.logging.global_logger_name)
|
||||
|
||||
self.resolver = resolver.DesignResolver()
|
||||
return
|
||||
|
||||
def connect_db(self):
|
||||
@ -73,7 +72,7 @@ class DrydockState(object):
|
||||
conn.close()
|
||||
|
||||
def get_design_documents(self, design_ref):
|
||||
return self.resolver.resolve_reference(design_ref)
|
||||
return ReferenceResolver.resolve_reference(design_ref)
|
||||
|
||||
def get_tasks(self):
|
||||
"""Get all tasks in the database."""
|
||||
|
@ -27,19 +27,30 @@ import pytest
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_ingester():
|
||||
def deckhand_ingester():
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester')
|
||||
return ingester
|
||||
|
||||
@pytest.fixture()
|
||||
def yaml_ingester():
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
return ingester
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_orchestrator(drydock_state, test_ingester):
|
||||
def deckhand_orchestrator(drydock_state, deckhand_ingester):
|
||||
orchestrator = Orchestrator(
|
||||
state_manager=drydock_state, ingester=test_ingester)
|
||||
state_manager=drydock_state, ingester=deckhand_ingester)
|
||||
return orchestrator
|
||||
|
||||
@pytest.fixture()
|
||||
def yaml_orchestrator(drydock_state, yaml_ingester):
|
||||
orchestrator = Orchestrator(
|
||||
state_manager=drydock_state, ingester=yaml_ingester)
|
||||
return orchestrator
|
||||
|
||||
@pytest.fixture()
|
||||
def blank_state(drydock_state):
|
||||
|
@ -73,7 +73,8 @@ class TestClass(object):
|
||||
test_task = test_orchestrator.create_task(
|
||||
action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref)
|
||||
|
||||
id_key = test_orchestrator.create_bootaction_context('compute01', test_task)
|
||||
id_key = test_orchestrator.create_bootaction_context(
|
||||
'compute01', test_task)
|
||||
|
||||
ba_ctx = dict(
|
||||
nodename='compute01',
|
||||
|
@ -104,7 +104,8 @@ class TestClass(object):
|
||||
id_key = os.urandom(32)
|
||||
action_id = ulid2.generate_binary_ulid()
|
||||
blank_state.post_boot_action('compute01',
|
||||
test_task.get_id(), id_key, action_id, 'helloworld')
|
||||
test_task.get_id(), id_key, action_id,
|
||||
'helloworld')
|
||||
|
||||
ba = dict(
|
||||
nodename='compute01',
|
||||
|
@ -28,8 +28,7 @@ class TestPostgres(object):
|
||||
nodename = 'testnode'
|
||||
result = drydock_state.post_boot_action(nodename,
|
||||
populateddb.get_id(), id_key,
|
||||
action_id,
|
||||
'helloworld')
|
||||
action_id, 'helloworld')
|
||||
|
||||
assert result
|
||||
|
||||
@ -39,7 +38,8 @@ class TestPostgres(object):
|
||||
action_id = ulid2.generate_binary_ulid()
|
||||
nodename = 'testnode'
|
||||
drydock_state.post_boot_action(nodename,
|
||||
populateddb.get_id(), id_key, action_id, 'helloworld')
|
||||
populateddb.get_id(), id_key, action_id,
|
||||
'helloworld')
|
||||
|
||||
result = drydock_state.put_bootaction_status(
|
||||
ulid2.encode_ulid_base32(action_id),
|
||||
@ -53,7 +53,8 @@ class TestPostgres(object):
|
||||
action_id = ulid2.generate_binary_ulid()
|
||||
nodename = 'testnode'
|
||||
drydock_state.post_boot_action(nodename,
|
||||
populateddb.get_id(), id_key, action_id, 'helloworld')
|
||||
populateddb.get_id(), id_key, action_id,
|
||||
'helloworld')
|
||||
|
||||
ba = drydock_state.get_boot_action(ulid2.encode_ulid_base32(action_id))
|
||||
|
||||
|
@ -15,24 +15,20 @@
|
||||
|
||||
import ulid2
|
||||
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_bootaction_render(self, input_files, setup):
|
||||
def test_bootaction_render(self, input_files, deckhand_ingester, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
ba = design_data.get_bootaction('helloworld')
|
||||
|
@ -17,13 +17,13 @@ import drydock_provisioner.objects as objects
|
||||
|
||||
class TestClass(object):
|
||||
def test_bootaction_scoping_blankfilter(self, input_files,
|
||||
test_orchestrator):
|
||||
deckhand_orchestrator):
|
||||
"""Test a boot action with no node filter scopes correctly."""
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
design_status, design_data = test_orchestrator.get_effective_site(
|
||||
design_status, design_data = deckhand_orchestrator.get_effective_site(
|
||||
design_ref)
|
||||
|
||||
assert design_status.status == objects.fields.ActionResult.Success
|
||||
@ -36,13 +36,13 @@ class TestClass(object):
|
||||
assert 'controller01' in ba.target_nodes
|
||||
|
||||
def test_bootaction_scoping_unionfilter(self, input_files,
|
||||
test_orchestrator):
|
||||
deckhand_orchestrator):
|
||||
"""Test a boot action with a union node filter scopes correctly."""
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
design_status, design_data = test_orchestrator.get_effective_site(
|
||||
design_status, design_data = deckhand_orchestrator.get_effective_site(
|
||||
design_ref)
|
||||
|
||||
assert design_status.status == objects.fields.ActionResult.Success
|
||||
|
@ -18,24 +18,20 @@ import tarfile
|
||||
import io
|
||||
|
||||
import drydock_provisioner.objects as objects
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
from drydock_provisioner.control.bootaction import BootactionUtils
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_bootaction_tarbuilder(self, input_files, setup):
|
||||
def test_bootaction_tarbuilder(self, input_files, deckhand_ingester, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
target_host = 'compute01'
|
||||
|
@ -13,13 +13,28 @@
|
||||
# limitations under the License.
|
||||
"""Test YAML data ingestion."""
|
||||
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_ingest_full_site(self, input_files, setup):
|
||||
def test_ingest_deckhand(self, input_files, setup, deckhand_ingester):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
print("%s" % str(design_status.to_dict()))
|
||||
assert design_status.status == objects.fields.ActionResult.Success
|
||||
assert len(design_data.host_profiles) == 2
|
||||
assert len(design_data.baremetal_nodes) == 2
|
||||
|
||||
def test_ingest_yaml(self, input_files, setup, yaml_ingester):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
@ -27,11 +42,10 @@ class TestClass(object):
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = yaml_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
print("%s" % str(design_status.to_dict()))
|
||||
assert design_status.status == objects.fields.ActionResult.Success
|
||||
assert len(design_data.host_profiles) == 2
|
||||
assert len(design_data.baremetal_nodes) == 2
|
||||
|
@ -13,13 +13,12 @@
|
||||
# limitations under the License.
|
||||
"""Test that boot action models are properly parsed."""
|
||||
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_bootaction_parse(self, input_files, setup):
|
||||
def test_bootaction_parse(self, input_files, deckhand_ingester, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("bootaction.yaml")
|
||||
@ -27,10 +26,7 @@ class TestClass(object):
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
ba = design_data.get_bootaction('helloworld')
|
||||
|
36
tests/unit/test_ingester_invalidation.py
Normal file
36
tests/unit/test_ingester_invalidation.py
Normal file
@ -0,0 +1,36 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Test that boot action models are properly parsed."""
|
||||
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_bootaction_parse(self, input_files, deckhand_ingester, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("invalid_bootaction.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
assert design_status.status == objects.fields.ActionResult.Failure
|
||||
|
||||
print(str(design_status.to_dict()))
|
||||
error_msgs = [m for m in design_status.message_list if m.error]
|
||||
assert len(error_msgs) == 2
|
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
"""Test that rack models are properly parsed."""
|
||||
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
import drydock_provisioner.error as errors
|
||||
@ -22,36 +21,32 @@ import pytest
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_rack_parse(self, input_files, setup):
|
||||
def test_rack_parse(self, deckhand_ingester, input_files, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
print("%s" % str(design_status.to_dict()))
|
||||
assert design_status.status == objects.fields.ActionResult.Success
|
||||
rack = design_data.get_rack('rack1')
|
||||
|
||||
assert rack.location.get('grid') == 'EG12'
|
||||
|
||||
def test_rack_not_found(self, input_files, setup):
|
||||
def test_rack_not_found(self, deckhand_ingester, input_files, setup):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
with pytest.raises(errors.DesignError):
|
||||
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Test that YAML ingestion works."""
|
||||
|
||||
from drydock_provisioner.ingester.plugins.yaml import YamlIngester
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_ingest_singledoc(self, input_files):
|
||||
input_file = input_files.join("singledoc.yaml")
|
||||
|
||||
ingester = YamlIngester()
|
||||
|
||||
f = open(str(input_file), 'rb')
|
||||
yaml_string = f.read()
|
||||
|
||||
status, models = ingester.ingest_data(content=yaml_string)
|
||||
|
||||
assert status.status == 'success'
|
||||
assert len(models) == 1
|
||||
|
||||
def test_ingest_multidoc(self, input_files):
|
||||
input_file = input_files.join("multidoc.yaml")
|
||||
|
||||
ingester = YamlIngester()
|
||||
|
||||
f = open(str(input_file), 'rb')
|
||||
yaml_string = f.read()
|
||||
|
||||
status, models = ingester.ingest_data(content=yaml_string)
|
||||
|
||||
assert status.status == 'success'
|
||||
assert len(models) == 3
|
@ -13,22 +13,18 @@
|
||||
# limitations under the License.
|
||||
"""Test the node filter logic in the orchestrator."""
|
||||
|
||||
from drydock_provisioner.ingester.ingester import Ingester
|
||||
from drydock_provisioner.statemgmt.state import DrydockState
|
||||
import drydock_provisioner.objects as objects
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_node_filter_obj(self, input_files, setup, test_orchestrator):
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
def test_node_filter_obj(self, input_files, setup, deckhand_orchestrator, deckhand_ingester):
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
nf = objects.NodeFilter()
|
||||
@ -37,20 +33,17 @@ class TestClass(object):
|
||||
nfs = objects.NodeFilterSet(
|
||||
filter_set_type='intersection', filter_set=[nf])
|
||||
|
||||
node_list = test_orchestrator.process_node_filter(nfs, design_data)
|
||||
node_list = deckhand_orchestrator.process_node_filter(nfs, design_data)
|
||||
|
||||
assert len(node_list) == 1
|
||||
|
||||
def test_node_filter_dict(self, input_files, setup, test_orchestrator):
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
def test_node_filter_dict(self, input_files, setup, deckhand_orchestrator, deckhand_ingester):
|
||||
input_file = input_files.join("deckhand_fullsite.yaml")
|
||||
|
||||
design_state = DrydockState()
|
||||
design_ref = "file://%s" % str(input_file)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugin(
|
||||
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
|
||||
design_status, design_data = ingester.ingest_data(
|
||||
design_status, design_data = deckhand_ingester.ingest_data(
|
||||
design_state=design_state, design_ref=design_ref)
|
||||
|
||||
nfs = {
|
||||
@ -64,6 +57,6 @@ class TestClass(object):
|
||||
],
|
||||
}
|
||||
|
||||
node_list = test_orchestrator.process_node_filter(nfs, design_data)
|
||||
node_list = deckhand_orchestrator.process_node_filter(nfs, design_data)
|
||||
|
||||
assert len(node_list) == 1
|
||||
|
57
tests/unit/test_reference_resolver.py
Normal file
57
tests/unit/test_reference_resolver.py
Normal file
@ -0,0 +1,57 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Test the data reference resolver."""
|
||||
|
||||
import base64
|
||||
|
||||
import responses
|
||||
|
||||
from drydock_provisioner.statemgmt.design.resolver import ReferenceResolver
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
def test_resolve_file_url(self, input_files):
|
||||
"""Test that the resolver will resolve file URLs."""
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
url = 'file://%s' % str(input_file)
|
||||
|
||||
content = ReferenceResolver.resolve_reference(url)
|
||||
|
||||
assert len(content) > 0
|
||||
|
||||
@responses.activate
|
||||
def test_resolve_http_url(self):
|
||||
"""Test that the resolver will resolve http URLs."""
|
||||
url = 'http://foo.com/test.yaml'
|
||||
responses.add(responses.GET, url)
|
||||
|
||||
ReferenceResolver.resolve_reference(url)
|
||||
|
||||
assert len(responses.calls) == 1
|
||||
assert responses.calls[0].request.url == url
|
||||
|
||||
@responses.activate
|
||||
def test_resolve_http_basicauth_url(self):
|
||||
"""Test the resolver will resolve http URLs w/ basic auth."""
|
||||
url = 'http://user:pass@foo.com/test.yaml'
|
||||
auth_header = "Basic %s" % base64.b64encode(
|
||||
"user:pass".encode('utf-8')).decode('utf-8')
|
||||
responses.add(responses.GET, url)
|
||||
|
||||
ReferenceResolver.resolve_reference(url)
|
||||
|
||||
assert len(responses.calls) == 1
|
||||
assert 'Authorization' in responses.calls[0].request.headers
|
||||
assert responses.calls[0].request.headers.get(
|
||||
'Authorization') == auth_header
|
@ -1,27 +1,31 @@
|
||||
---
|
||||
apiVersion: 'drydock/v1'
|
||||
kind: BootAction
|
||||
schema: 'drydock/BootAction/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: helloworld
|
||||
region: sitename
|
||||
date: 17-FEB-2017
|
||||
author: Scott Hussey
|
||||
spec:
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
assets:
|
||||
- path: /var/tmp/hello.sh
|
||||
type: file
|
||||
permissions: '555'
|
||||
data: |-
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkIScK
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
|
||||
Jwo=
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_encode
|
||||
- utf8_decode
|
||||
- template
|
||||
- path: /lib/systemd/system/hello.service
|
||||
type: unit
|
||||
permissions: '600'
|
||||
data: |-
|
||||
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
|
||||
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
|
||||
dGFyZ2V0Cg==
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_decode
|
||||
...
|
||||
|
448
tests/yaml_samples/deckhand_fullsite.yaml
Normal file
448
tests/yaml_samples/deckhand_fullsite.yaml
Normal file
@ -0,0 +1,448 @@
|
||||
#Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
####################
|
||||
#
|
||||
# bootstrap_seed.yaml - Site server design definition for physical layer
|
||||
#
|
||||
####################
|
||||
# version the schema in this file so consumers can rationally parse it
|
||||
---
|
||||
schema: 'drydock/Region/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: 'sitename'
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
tag_definitions:
|
||||
- tag: 'test'
|
||||
definition_type: 'lshw_xpath'
|
||||
definition: "//node[@id=\"display\"]/'clock units=\"Hz\"' > 1000000000"
|
||||
authorized_keys:
|
||||
- |
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDENeyO5hLPbLLQRZ0oafTYWs1ieo5Q+XgyZQs51Ju
|
||||
jDGc8lKlWsg1/6yei2JewKMgcwG2Buu1eqU92Xn1SvMZLyt9GZURuBkyjcfVc/8GiU5QP1Of8B7CV0c
|
||||
kfUpHWYJ17olTzT61Hgz10ioicBF6cjgQrLNcyn05xoaJHD2Vpf8Unxzi0YzA2e77yRqBo9jJVRaX2q
|
||||
wUJuZrzb62x3zw8Knz6GGSZBn8xRKLaw1SKFpd1hwvL62GfqX5ZBAT1AYTZP1j8GcAoK8AFVn193SEU
|
||||
vjSdUFa+RNWuJhkjBRfylJczIjTIFb5ls0jpbA3bMA9DE7lFKVQl6vVwFmiIVBI1 samplekey
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: oob
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
bonding:
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: 100full
|
||||
trunking:
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
allowed_networks:
|
||||
- oob
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: pxe
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
bonding:
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
trunking:
|
||||
mode: disabled
|
||||
default_network: pxe
|
||||
allowed_networks:
|
||||
- pxe
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: gp
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
bonding:
|
||||
mode: 802.3ad
|
||||
hash: layer3+4
|
||||
peer_rate: slow
|
||||
mtu: 9000
|
||||
linkspeed: auto
|
||||
trunking:
|
||||
mode: 802.1q
|
||||
default_network: mgmt
|
||||
allowed_networks:
|
||||
- public
|
||||
- mgmt
|
||||
---
|
||||
schema: 'drydock/Rack/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: rack1
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
tor_switches:
|
||||
switch01name:
|
||||
mgmt_ip: 1.1.1.1
|
||||
sdn_api_uri: polo+https://polo-api.web.att.com/switchmgmt?switch=switch01name
|
||||
switch02name:
|
||||
mgmt_ip: 1.1.1.2
|
||||
sdn_api_uri: polo+https://polo-api.web.att.com/switchmgmt?switch=switch02name
|
||||
location:
|
||||
clli: HSTNTXMOCG0
|
||||
grid: EG12
|
||||
local_networks:
|
||||
- pxe-rack1
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: oob
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
cidr: 172.16.100.0/24
|
||||
ranges:
|
||||
- type: static
|
||||
start: 172.16.100.15
|
||||
end: 172.16.100.254
|
||||
dns:
|
||||
domain: ilo.sitename.att.com
|
||||
servers: 172.16.100.10
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: pxe
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
dhcp_relay:
|
||||
self_ip: 172.16.0.4
|
||||
upstream_target: 172.16.5.5
|
||||
mtu: 1500
|
||||
cidr: 172.16.0.0/24
|
||||
ranges:
|
||||
- type: dhcp
|
||||
start: 172.16.0.5
|
||||
end: 172.16.0.254
|
||||
dns:
|
||||
domain: admin.sitename.att.com
|
||||
servers: 172.16.0.10
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: mgmt
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
vlan: '100'
|
||||
mtu: 1500
|
||||
cidr: 172.16.1.0/24
|
||||
ranges:
|
||||
- type: static
|
||||
start: 172.16.1.15
|
||||
end: 172.16.1.254
|
||||
routes:
|
||||
- subnet: 0.0.0.0/0
|
||||
gateway: 172.16.1.1
|
||||
metric: 10
|
||||
dns:
|
||||
domain: mgmt.sitename.example.com
|
||||
servers: 172.16.1.9,172.16.1.10
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: private
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
vlan: '101'
|
||||
mtu: 9000
|
||||
cidr: 172.16.2.0/24
|
||||
ranges:
|
||||
- type: static
|
||||
start: 172.16.2.15
|
||||
end: 172.16.2.254
|
||||
dns:
|
||||
domain: priv.sitename.example.com
|
||||
servers: 172.16.2.9,172.16.2.10
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: public
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
vlan: '102'
|
||||
mtu: 1500
|
||||
cidr: 172.16.3.0/24
|
||||
ranges:
|
||||
- type: static
|
||||
start: 172.16.3.15
|
||||
end: 172.16.3.254
|
||||
routes:
|
||||
- subnet: 0.0.0.0/0
|
||||
gateway: 172.16.3.1
|
||||
metric: 10
|
||||
dns:
|
||||
domain: sitename.example.com
|
||||
servers: 8.8.8.8
|
||||
---
|
||||
schema: 'drydock/HostProfile/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: defaults
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
oob:
|
||||
type: ipmi
|
||||
network: oob
|
||||
account: admin
|
||||
credential: admin
|
||||
storage:
|
||||
physical_devices:
|
||||
sda:
|
||||
labels:
|
||||
role: rootdisk
|
||||
partitions:
|
||||
- name: root
|
||||
size: 20g
|
||||
bootable: true
|
||||
filesystem:
|
||||
mountpoint: '/'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: boot
|
||||
size: 1g
|
||||
bootable: false
|
||||
filesystem:
|
||||
mountpoint: '/boot'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
sdb:
|
||||
volume_group: 'log_vg'
|
||||
volume_groups:
|
||||
log_vg:
|
||||
logical_volumes:
|
||||
- name: 'log_lv'
|
||||
size: '500m'
|
||||
filesystem:
|
||||
mountpoint: '/var/log'
|
||||
fstype: 'xfs'
|
||||
mount_options: 'defaults'
|
||||
platform:
|
||||
image: ubuntu_16.04
|
||||
kernel: generic
|
||||
kernel_params:
|
||||
quiet: true
|
||||
console: ttyS2
|
||||
metadata:
|
||||
owner_data:
|
||||
foo: bar
|
||||
---
|
||||
schema: 'drydock/HostProfile/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: 'k8-node'
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
host_profile: defaults
|
||||
hardware_profile: HPGen9v3
|
||||
primary_network: mgmt
|
||||
interfaces:
|
||||
pxe:
|
||||
device_link: pxe
|
||||
labels:
|
||||
noconfig: true
|
||||
slaves:
|
||||
- prim_nic01
|
||||
networks:
|
||||
- pxe
|
||||
bond0:
|
||||
device_link: gp
|
||||
slaves:
|
||||
- prim_nic01
|
||||
- prim_nic02
|
||||
networks:
|
||||
- mgmt
|
||||
- private
|
||||
metadata:
|
||||
tags:
|
||||
- 'test'
|
||||
---
|
||||
schema: 'drydock/BaremetalNode/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: controller01
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
host_profile: k8-node
|
||||
interfaces:
|
||||
bond0:
|
||||
networks:
|
||||
- '!private'
|
||||
addressing:
|
||||
- network: pxe
|
||||
address: dhcp
|
||||
- network: mgmt
|
||||
address: 172.16.1.20
|
||||
- network: public
|
||||
address: 172.16.3.20
|
||||
- network: oob
|
||||
address: 172.16.100.20
|
||||
metadata:
|
||||
rack: rack1
|
||||
---
|
||||
schema: 'drydock/BaremetalNode/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: compute01
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
host_profile: k8-node
|
||||
addressing:
|
||||
- network: pxe
|
||||
address: dhcp
|
||||
- network: mgmt
|
||||
address: 172.16.1.21
|
||||
- network: private
|
||||
address: 172.16.2.21
|
||||
- network: oob
|
||||
address: 172.16.100.21
|
||||
metadata:
|
||||
rack: rack2
|
||||
---
|
||||
schema: 'drydock/HardwareProfile/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: HPGen9v3
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
vendor: HP
|
||||
generation: '8'
|
||||
hw_version: '3'
|
||||
bios_version: '2.2.3'
|
||||
boot_mode: bios
|
||||
bootstrap_protocol: pxe
|
||||
pxe_interface: 0
|
||||
device_aliases:
|
||||
prim_nic01:
|
||||
address: '0000:00:03.0'
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
prim_nic02:
|
||||
address: '0000:00:04.0'
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
primary_boot:
|
||||
address: '2:0.0.0'
|
||||
dev_type: 'VBOX HARDDISK'
|
||||
bus_type: 'scsi'
|
||||
---
|
||||
schema: 'drydock/BootAction/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: helloworld
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
assets:
|
||||
- path: /var/tmp/hello.sh
|
||||
type: file
|
||||
permissions: '555'
|
||||
data: |-
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
|
||||
Jwo=
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_decode
|
||||
- template
|
||||
- path: /lib/systemd/system/hello.service
|
||||
type: unit
|
||||
permissions: '600'
|
||||
data: |-
|
||||
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
|
||||
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
|
||||
dGFyZ2V0Cg==
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_decode
|
||||
---
|
||||
schema: 'drydock/BootAction/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: hw_filtered
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
node_filter:
|
||||
filter_set_type: 'union'
|
||||
filter_set:
|
||||
- filter_type: 'union'
|
||||
node_names:
|
||||
- 'compute01'
|
||||
assets:
|
||||
- path: /var/tmp/hello.sh
|
||||
type: file
|
||||
permissions: '555'
|
||||
data: |-
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
|
||||
Jwo=
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_decode
|
||||
- template
|
||||
- path: /lib/systemd/system/hello.service
|
||||
type: unit
|
||||
permissions: '600'
|
||||
data: |-
|
||||
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
|
||||
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
|
||||
dGFyZ2V0Cg==
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
- utf8_decode
|
||||
...
|
@ -1,18 +1,31 @@
|
||||
---
|
||||
schema: 'drydock/BootAction/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: helloworld
|
||||
storagePolicy: 'cleartext'
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
assets:
|
||||
- path: /var/tmp/hello.sh
|
||||
type: file
|
||||
permissions: 555
|
||||
data: |
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkIScK
|
||||
permissions: '555'
|
||||
data: |-
|
||||
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
|
||||
Jwo=
|
||||
data_pipeline:
|
||||
- foo
|
||||
- path: hello.service
|
||||
- utf8_decode
|
||||
- template
|
||||
- path: /lib/systemd/system/hello.service
|
||||
type: unit
|
||||
data: |
|
||||
permissions: 600
|
||||
data: |-
|
||||
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
|
||||
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
|
||||
dGFyZ2V0Cg==
|
||||
data_pipeline:
|
||||
- base64_decode
|
||||
|
||||
- utf8_decode
|
||||
...
|
||||
|
Loading…
Reference in New Issue
Block a user