spyglass/spyglass/parser/engine.py

361 lines
14 KiB
Python
Executable File

# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import os
import pprint
import sys
import jsonschema
from netaddr import IPNetwork
from pkg_resources import resource_filename
import yaml
LOG = logging.getLogger(__name__)
class ProcessDataSource(object):
def __init__(self, site_type):
# Initialize intermediary and save site type
self._initialize_intermediary()
self.region_name = site_type
@staticmethod
def _read_file(file_name):
with open(file_name, "r") as f:
raw_data = f.read()
return raw_data
def _initialize_intermediary(self):
# TODO(ian-pittwood): Define these in init, remove this function
self.host_type = {}
self.data = {
"network": {},
"baremetal": {},
"region_name": "",
"storage": {},
"site_info": {},
}
self.sitetype = None
self.genesis_node = None
self.region_name = None
self.network_subnets = None
def _get_network_subnets(self):
"""Extract subnet information for networks.
In some networks, there are multiple subnets, in that case
we assign only the first subnet
"""
LOG.info("Extracting network subnets")
network_subnets = {}
for net_type in self.data.network.vlan_network_data:
# One of the type is ingress and we don't want that here
if net_type.name != "ingress":
network_subnets[net_type.name] = IPNetwork(net_type.subnet[0])
LOG.debug(
"Network subnets:\n{}".format(pprint.pformat(network_subnets)))
return network_subnets
def _get_genesis_node_details(self):
# TODO(ian-pittwood): Use get_baremetal_host_by_type instead
# TODO(ian-pittwood): Below should be docstring, not comment
# Get genesis host node details from the hosts based on host type
for rack in self.data.baremetal:
for host in rack.hosts:
if host.type == "genesis":
self.genesis_node = host
LOG.debug(
"Genesis Node Details:\n{}".format(
pprint.pformat(self.genesis_node)))
@staticmethod
def _validate_intermediary_data(data):
"""Validates the intermediary data before generating manifests.
It checks whether the data types and data format are as expected.
The method validates this with regex pattern defined for each
data type.
"""
# TODO(ian-pittwood): Implement intermediary validation or remove
LOG.info("Validating Intermediary data")
# Performing a deep copy
temp_data = copy.deepcopy(data)
# Converting baremetal dict to list.
baremetal_list = []
for rack in temp_data.baremetal:
temp = [{k: v} for k, v in temp_data["baremetal"][rack].items()]
baremetal_list = baremetal_list + temp
temp_data["baremetal"] = baremetal_list
schema_dir = resource_filename("spyglass", "schemas/")
schema_file = schema_dir + "data_schema.json"
json_data = json.loads(json.dumps(temp_data))
with open(schema_file, "r") as f:
json_schema = json.load(f)
try:
# Suppressing writing of data2.json. Can use it for debugging
# with open('data2.json', 'w') as outfile:
# json.dump(temp_data, outfile, sort_keys=True, indent=4)
jsonschema.validate(json_data, json_schema)
except jsonschema.exceptions.ValidationError as e:
LOG.error("Validation Error")
LOG.error("Message:{}".format(e.message))
LOG.error("Validator_path:{}".format(e.path))
LOG.error("Validator_pattern:{}".format(e.validator_value))
LOG.error("Validator:{}".format(e.validator))
sys.exit()
except jsonschema.exceptions.SchemaError as e:
LOG.error("Schema Validation Error!!")
LOG.error("Message:{}".format(e.message))
LOG.error("Schema:{}".format(e.schema))
LOG.error("Validator_value:{}".format(e.validator_value))
LOG.error("Validator:{}".format(e.validator))
LOG.error("path:{}".format(e.path))
sys.exit()
LOG.info("Data validation Passed!")
def _apply_design_rules(self):
"""Applies design rules from rules.yaml
These rules are used to determine ip address allocation ranges,
host profile interfaces and also to create hardware profile
information. The method calls corresponding rule handler function
based on rule name and applies them to appropriate data objects.
"""
LOG.info("Apply design rules")
# TODO(ian-pittwood): Use more robust path creation methods such
# as os.path.join. We may also want to let
# users specify these in cli opts. We also need
# better guidelines over how to write these rules
# and how they are applied.
rules_dir = resource_filename("spyglass", "config/")
rules_file = rules_dir + "rules.yaml"
rules_data_raw = self._read_file(rules_file)
rules_yaml = yaml.safe_load(rules_data_raw)
rules_data = {}
rules_data.update(rules_yaml)
for rule in rules_data.keys():
rule_name = rules_data[rule]["name"]
function_str = "_apply_rule_" + rule_name
rule_data_name = rules_data[rule][rule_name]
function = getattr(self, function_str)
function(rule_data_name)
LOG.info("Applying rule:{}".format(rule_name))
def _apply_rule_host_profile_interfaces(self, rule_data):
# TODO(pg710r)Nothing to do as of now since host profile
# information is already present in plugin data.
# This function shall be defined if plugin data source
# doesn't provide host profile information.
# TODO(ian-pittwood): Should be implemented as it is outside of
# our plugin packages. Logic can be implemented
# to ensure proper data processing.
pass
def _apply_rule_hardware_profile(self, rule_data):
"""Apply rules to define host type from hardware profile info.
Host profile will define host types as "controller, compute or
genesis". The rule_data has pre-defined information to define
compute or controller based on host_profile. For defining 'genesis'
the first controller host is defined as genesis.
"""
is_genesis = False
hardware_profile = rule_data[self.data.site_info.sitetype]
# Getting individual racks. The racks are sorted to ensure that the
# first controller of the first rack is assigned as 'genesis' node.
for rack in sorted(self.data.baremetal, key=lambda x: x.name):
# Getting individual hosts in each rack. Sorting of the hosts are
# done to determine the genesis node.
for host in sorted(rack.hosts, key=lambda x: x.name):
if host.host_profile == \
hardware_profile["profile_name"]["ctrl"]:
if not is_genesis:
host.type = "genesis"
is_genesis = True
else:
host.type = "controller"
else:
host.type = "compute"
def _apply_rule_ip_alloc_offset(self, rule_data):
"""Apply offset rules to update baremetal host
ip's and vlan network
"""
# Get network subnets
self.network_subnets = self._get_network_subnets()
self._update_vlan_net_data(rule_data)
self._update_baremetal_host_ip_data(rule_data)
def _update_baremetal_host_ip_data(self, rule_data):
"""Update baremetal host ip's for applicable networks.
The applicable networks are oob, oam, ksn, storage and overlay.
These IPs are assigned based on network subnets ranges.
If a particular ip exists it is overridden.
"""
# Ger default ip offset
default_ip_offset = rule_data["default"]
host_idx = 0
LOG.info("Update baremetal host ip's")
# TODO(ian-pittwood): this can be redone to be cleaner with models
for rack in self.data.baremetal:
for host in rack.hosts:
for net_type, net_ip in iter(host.ip):
ips = list(self.network_subnets[net_type])
host.ip.set_ip_by_role(
net_type, str(ips[host_idx + default_ip_offset]))
host_idx += 1
return
def _update_vlan_net_data(self, rule_data):
"""Offset allocation rules to determine ip address range(s)
This rule is applied to incoming network data to determine
network address, gateway ip and other address ranges
"""
LOG.info("Apply network design rules")
# Collect Rules
default_ip_offset = rule_data["default"]
oob_ip_offset = rule_data["oob"]
gateway_ip_offset = rule_data["gateway"]
ingress_vip_offset = rule_data["ingress_vip"]
# static_ip_end_offset for non pxe network
static_ip_end_offset = rule_data["static_ip_end"]
# dhcp_ip_end_offset for pxe network
dhcp_ip_end_offset = rule_data["dhcp_ip_end"]
# Set ingress vip and CIDR for bgp
LOG.info("Apply network design rules:bgp")
ingress_data = self.data.network.get_vlan_data_by_name('ingress')
subnet = IPNetwork(ingress_data.subnet[0])
ips = list(subnet)
self.data.network.bgp["ingress_vip"] = \
str(ips[ingress_vip_offset])
self.data.network.bgp["public_service_cidr"] = \
ingress_data.subnet[0]
LOG.debug(
"Updated network bgp data:\n{}".format(
pprint.pformat(self.data.network.bgp)))
LOG.info("Apply network design rules:vlan")
# Apply rules to vlan networks
for net_type in self.network_subnets.keys():
vlan_network_data_ = \
self.data.network.get_vlan_data_by_name(net_type)
if net_type == "oob":
ip_offset = oob_ip_offset
else:
ip_offset = default_ip_offset
subnet = self.network_subnets[net_type]
ips = list(subnet)
vlan_network_data_.gateway = str(ips[gateway_ip_offset])
vlan_network_data_.reserved_start = str(ips[1])
vlan_network_data_.reserved_end = str(ips[ip_offset])
static_start = str(ips[ip_offset + 1])
static_end = str(ips[static_ip_end_offset])
if net_type == "pxe":
mid = len(ips) // 2
static_end = str(ips[mid - 1])
dhcp_start = str(ips[mid])
dhcp_end = str(ips[dhcp_ip_end_offset])
vlan_network_data_.dhcp_start = dhcp_start
vlan_network_data_.dhcp_end = dhcp_end
vlan_network_data_.static_start = static_start
vlan_network_data_.static_end = static_end
# OAM have default routes. Only for cruiser. TBD
if net_type == "oam":
vlan_network_data_.routes = ["0.0.0.0/0"] # nosec
else:
vlan_network_data_.routes = []
LOG.debug(
"Updated vlan network data:\n{}".format(
pprint.pformat(vlan_network_data_.dict_from_class())))
def load_extracted_data_from_data_source(self, extracted_data):
# TODO(ian-pittwood): Remove this and use init
"""Function called from cli.py to pass extracted data
from input data source
"""
LOG.info("Loading plugin data source")
self.data = extracted_data
LOG.debug(
"Extracted data from plugin:\n{}".format(
pprint.pformat(extracted_data)))
# Uncomment following segment for debugging purpose.
# extracted_file = "extracted_file.yaml"
# yaml_file = yaml.dump(extracted_data, default_flow_style=False)
# with open(extracted_file, 'w') as f:
# f.write(yaml_file)
# f.close()
def dump_intermediary_file(self, intermediary_dir):
"""Writing intermediary yaml"""
LOG.info("Writing intermediary yaml")
intermediary_file = "{}_intermediary.yaml" \
.format(self.region_name)
# Check of if output dir = intermediary_dir exists
if intermediary_dir is not None:
outfile = os.path.join(intermediary_dir, intermediary_file)
else:
outfile = intermediary_file
LOG.info("Intermediary file:{}".format(outfile))
yaml_file = yaml.dump(
self.data.dict_from_class(), default_flow_style=False)
with open(outfile, "w") as f:
f.write(yaml_file)
f.close()
def generate_intermediary_yaml(self):
"""Generating intermediary yaml"""
LOG.info("Start: Generate Intermediary")
self._apply_design_rules()
self._get_genesis_node_details()
# This will validate the extracted data from different sources.
# self._validate_intermediary_data(self.data)
return self.data