@@ -103,10 +103,10 @@ The python scripts under stack-builder can be used to instantiate scenarios on a
|
||||
|
||||
To create a basic 2 role cluster with a build, compute and control node outside of the jenkins environment, some configuration must be set either in data/heira_data/user.yaml or by setting environment variables prefixed with "jenkins_"
|
||||
|
||||
export jenkins_internal_ip='%{ipaddress_eth1}'
|
||||
export jenkins_tunnel_ip='%{ipaddress_eth1}'
|
||||
export jenkins_initial_ntp=ntp.esl.cisco.com
|
||||
export jenkins_installer_repo=michaeltchapman
|
||||
export osi_user_internal_ip='%{ipaddress_eth1}'
|
||||
export osi_user_tunnel_ip='%{ipaddress_eth1}'
|
||||
export osi_conf_initial_ntp=ntp.esl.cisco.com
|
||||
export osi_conf_installer_repo=michaeltchapman
|
||||
|
||||
The order of precedence is environment variables > user.yaml > jenkins.yaml > others. This heirarchy can be more clearly seen in manifests/setup.pp which defines the hiera ordering.
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ nodes:
|
||||
- 'stack-wait-build'
|
||||
- 'puppet-agent'
|
||||
- 'control-sig-compute'
|
||||
- 'test-nova'
|
||||
compute-server02:
|
||||
memory: 2512
|
||||
ip_number: 21
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
from build import make, get
|
||||
from clean import kill
|
||||
from fragment import show
|
||||
from fragment import show as frag_show
|
||||
from metadata import show as meta_show
|
||||
|
||||
from novaclient.v1_1 import client as nclient
|
||||
from quantumclient.quantum import client as qclient
|
||||
@@ -38,13 +39,19 @@ def main():
|
||||
parser_kill.add_argument('-t', '--test_id', default=None, help='id of the test to kill')
|
||||
parser_kill.set_defaults(func=kill)
|
||||
|
||||
|
||||
parser_frag = subparsers.add_parser('frag', help='fragment help')
|
||||
parser_frag.add_argument('node', help='node to build a fragment for')
|
||||
parser_frag.add_argument('-f', '--fragment_dir', default='./stack-builder/fragments', help='name of image to use')
|
||||
parser_frag.add_argument('-y', '--yaml_dir', default='./data', help='name of image to use')
|
||||
parser_frag.add_argument('-s', '--scenario', default='2_role', help='Scenario to use')
|
||||
parser_frag.set_defaults(func=show)
|
||||
parser_frag.set_defaults(func=frag_show)
|
||||
|
||||
parser_meta = subparsers.add_parser('meta', help='metadata help')
|
||||
parser_meta.add_argument('node', help='node to build metadata for')
|
||||
parser_meta.add_argument('-y', '--yaml_dir', default='./data', help='name of image to use')
|
||||
parser_meta.add_argument('-s', '--scenario', default='2_role', help='Scenario to use')
|
||||
parser_meta.add_argument('-c', '--config', default='config', help='Type of config to build - user or config')
|
||||
parser_meta.set_defaults(func=meta_show)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import os
|
||||
import uuid
|
||||
import quantumclient
|
||||
import fragment
|
||||
import yaml
|
||||
|
||||
from metadata import build_metadata
|
||||
from debug import dprint
|
||||
@@ -107,10 +108,10 @@ def make_subnet(q, ci_network_name, test_net, index=1, dhcp=True, gateway=False)
|
||||
test_subnet = net
|
||||
return test_subnet
|
||||
|
||||
def boot_puppetised_instance(n, name, image, nic_list, key='m', os_flavor=u'm1.medium',deploy="",files=None, meta={}):
|
||||
def boot_puppetised_instance(n, name, image_name, nic_list, key='test2', os_flavor=u'm1.medium',deploy="",files=None, meta={}):
|
||||
images = n.images.list()
|
||||
for i,image in enumerate([image.name for image in images]):
|
||||
if image == image:
|
||||
if image == image_name:
|
||||
boot_image = images[i]
|
||||
|
||||
flavors = n.flavors.list()
|
||||
@@ -127,7 +128,7 @@ def boot_puppetised_instance(n, name, image, nic_list, key='m', os_flavor=u'm1.m
|
||||
dprint("Boot files: " + str(files))
|
||||
dprint("Boot meta: " + str(meta))
|
||||
|
||||
return n.servers.create(name, image=boot_image, flavor=boot_flavor, userdata=deploy, files=files, key_name=key, nics=nic_list, config_drive=True, meta=meta)
|
||||
return n.servers.create(name, image=boot_image, flavor=boot_flavor, userdata=deploy, files=files, key_name=key, nics=nic_list, meta=meta)
|
||||
|
||||
# Cisco internal network
|
||||
def get_external_network(q):
|
||||
@@ -224,35 +225,46 @@ def make(n, q, args):
|
||||
# Not sure if we need this
|
||||
control_node_internal = net1_ports[0]['fixed_ips'][0]['ip_address']
|
||||
|
||||
config_meta = build_metadata(data_path)
|
||||
dprint('Metadata Without hardcodes ' + str(config_meta))
|
||||
# Put this into metadata and parse it on-node
|
||||
# from config drive. There are limits on the count
|
||||
# according to the doc but TODO confirm this
|
||||
config_meta.update({'controller_public_address' : control_node_ip,
|
||||
'controller_internal_address' : control_node_ip,
|
||||
'controller_admin_address' : control_node_ip,
|
||||
'cobbler_node_ip' : build_node_ip,
|
||||
'ci_test_id' : test_id
|
||||
# config is a dictionary updated from env vars and user supplied
|
||||
# yaml files to serve as input to hiera
|
||||
hiera_config_meta = build_metadata(data_path, scenario, 'user')
|
||||
|
||||
hiera_config_meta.update({'controller_public_address' : str(control_node_ip),
|
||||
'controller_internal_address' : str(control_node_ip),
|
||||
'controller_admin_address' : str(control_node_ip),
|
||||
'cobbler_node_ip' : str(build_node_ip),
|
||||
})
|
||||
|
||||
dprint('Metadata With hardcodes ' + str(config_meta))
|
||||
initial_config_meta = build_metadata(data_path, scenario, 'config')
|
||||
initial_config_meta.update({'controller_public_address' : str(control_node_ip),
|
||||
'controller_internal_address' : str(control_node_ip),
|
||||
'controller_admin_address' : str(control_node_ip),
|
||||
'cobbler_node_ip' : str(build_node_ip),
|
||||
})
|
||||
|
||||
build_deploy = fragment.compose('build-server', data_path, fragment_path, scenario, config_meta)
|
||||
control_deploy = fragment.compose('control-server', data_path, fragment_path, scenario, config_meta)
|
||||
compute_deploy = fragment.compose('compute-server02', data_path, fragment_path, scenario, config_meta)
|
||||
build_deploy = fragment.compose('build-server', data_path, fragment_path, scenario, initial_config_meta)
|
||||
control_deploy = fragment.compose('control-server', data_path, fragment_path, scenario, initial_config_meta)
|
||||
compute_deploy = fragment.compose('compute-server02', data_path, fragment_path, scenario, initial_config_meta)
|
||||
|
||||
dprint('build_deploy: ' + str(build_deploy))
|
||||
dprint('control_deploy: ' + str(control_deploy))
|
||||
dprint('compute_deploy: ' + str(compute_deploy))
|
||||
|
||||
user_config_yaml = yaml.dump(hiera_config_meta, default_flow_style=False)
|
||||
initial_config_yaml = yaml.dump(initial_config_meta, default_flow_style=False)
|
||||
|
||||
dprint('Config Yaml: \n' + str(initial_config_yaml))
|
||||
dprint('User Yaml: \n' + str(user_config_yaml))
|
||||
|
||||
build_node = boot_puppetised_instance(n,
|
||||
'build-server',
|
||||
image,
|
||||
build_nic_port_list([ci_ports[0]['id']]),
|
||||
deploy=build_deploy,
|
||||
files={'/root/hiera_config.py': build_server_hiera_config()},
|
||||
meta=config_meta
|
||||
files={u'/root/hiera_config.py': build_server_hiera_config(),
|
||||
u'/root/user.yaml' : user_config_yaml,
|
||||
u'/root/config.yaml' : initial_config_yaml},
|
||||
meta={'ci_test_id' : test_id}
|
||||
)
|
||||
|
||||
# eth0, eth1 preallocated, eth2 dhcp
|
||||
@@ -265,6 +277,7 @@ def make(n, q, args):
|
||||
image,
|
||||
control_nics,
|
||||
deploy=control_deploy,
|
||||
#files={u'/root/meta_data.yaml' : config_yaml},
|
||||
meta={'ci_test_id' : test_id})
|
||||
|
||||
compute_node = boot_puppetised_instance(n,
|
||||
@@ -272,6 +285,7 @@ def make(n, q, args):
|
||||
image,
|
||||
build_nic_net_list([get_ci_network(q), test_net1]),
|
||||
deploy=compute_deploy,
|
||||
#files={u'/root/meta_data.yaml' : config_yaml},
|
||||
meta={'ci_test_id' : test_id})
|
||||
|
||||
def get(n, q, args):
|
||||
|
||||
@@ -1,10 +1,25 @@
|
||||
#!/bin/python
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
stack-builder.clean
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module is used by the kill subcommand
|
||||
of the sb binary to destroy any virtual
|
||||
resources created by an sb make command, with
|
||||
the exception of the ci network, subnet and router
|
||||
used as the deployment network and providing NAT.
|
||||
|
||||
"""
|
||||
from novaclient.v1_1 import client as nclient
|
||||
from quantumclient.quantum import client as qclient
|
||||
from time import sleep
|
||||
import os
|
||||
|
||||
def kill(n, q, args):
|
||||
"""
|
||||
Destroy either all virtual test resources,
|
||||
or the resources from a particular run.
|
||||
"""
|
||||
test_id = None
|
||||
if args.test_id:
|
||||
test_id = args.test_id
|
||||
|
||||
@@ -2,6 +2,8 @@ import os
|
||||
import string
|
||||
import yaml
|
||||
|
||||
from metadata import build_metadata
|
||||
|
||||
class PercentTemplate(string.Template):
|
||||
delimiter='%'
|
||||
|
||||
@@ -45,4 +47,4 @@ def show(n, q, args):
|
||||
fragment_dir = args.fragment_dir
|
||||
scenario = args.scenario
|
||||
|
||||
print compose(hostname, yaml_dir, fragment_dir, scenario, {'cobbler_node_ip' : '192.168.1.100', 'controller_public_address' : '192.158.1.19'})
|
||||
print compose(hostname, yaml_dir, fragment_dir, scenario, build_metadata('./data', '2_role', 'config'))
|
||||
|
||||
38
stack-builder/fragments/control-wait-compute
Normal file
38
stack-builder/fragments/control-wait-compute
Normal file
@@ -0,0 +1,38 @@
|
||||
function ovs_node_count() {
|
||||
e=$(ovs-vsctl show | grep remote_ip | wc -l)
|
||||
return $e
|
||||
}
|
||||
|
||||
ready=false
|
||||
ovs_node_count
|
||||
node_count=$?
|
||||
|
||||
# Wait until at least one node is available
|
||||
until [ "$node_count" -gt 0 ]; do
|
||||
sleep(1)
|
||||
ovs_node_count
|
||||
node_count=$?
|
||||
done
|
||||
|
||||
until [ $ready = 'true' ]; do
|
||||
ovs_node_count
|
||||
initnodes=$?
|
||||
ready=true
|
||||
|
||||
# Check status of each node that is connected to ovs
|
||||
for node in $(ovs-vsctl show | grep remote_ip | cut -d '"' -f 2); do
|
||||
if [ ! $(curl http://"$nodes"/status | grep up) ]; then
|
||||
ready=false
|
||||
echo node "not ready for test to run"
|
||||
fi
|
||||
done
|
||||
sleep 5
|
||||
|
||||
ovs_node_count
|
||||
newnodes=$?
|
||||
|
||||
# if more nodes have come online then re-do the check
|
||||
if [ "$initnodes" -lt "$newnodes" ]; then
|
||||
ready=false
|
||||
fi
|
||||
done
|
||||
2
stack-builder/fragments/copy-user-yaml
Normal file
2
stack-builder/fragments/copy-user-yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
cp /root/user.yaml /etc/puppet/data/hiera_data
|
||||
cp /root/config.yaml /etc/puppet/data
|
||||
@@ -1,8 +1,7 @@
|
||||
# Fix fqdn so puppet apply doesn't give us one on the wrong domain
|
||||
sed -i 's/\#supersede/supersede/g' /etc/dhcp/dhclient.conf;
|
||||
sed -i 's/fugue.com home.vix.com/domain.name/g' /etc/dhcp/dhclient.conf;
|
||||
sed -i 's/fugue.com home.vix.com/%{domain}/g' /etc/dhcp/dhclient.conf;
|
||||
sed -i 's/domain-name,//g' /etc/dhcp/dhclient.conf
|
||||
dhclient -r eth0 && dhclient eth0;
|
||||
|
||||
service ntp stop
|
||||
ntpdate %{initial_ntp}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Override hiera values that have been passed in
|
||||
# through metadata
|
||||
python /root/openstack-installer/stack-builder/hiera_config.py
|
||||
|
||||
cp /root/config.yaml /etc/puppet/data
|
||||
cp /root/user.yaml /etc/puppet/data/hiera_data
|
||||
|
||||
@@ -1 +1 @@
|
||||
puppet agent -t --server build-server.domain.name
|
||||
puppet agent -t --server build-server.domain.name || [ $? -eq 2 ]
|
||||
|
||||
@@ -18,4 +18,4 @@ mkdir -p /mnt/config
|
||||
mount /dev/disk/by-label/config-2 /mnt/config
|
||||
|
||||
# Facter fqdn will come from DNS unless we do this
|
||||
echo "127.0.1.1 $(hostname).domain.name $(hostname)" >> /etc/hosts
|
||||
echo "127.0.1.1 $(hostname).%{domain} $(hostname)" >> /etc/hosts
|
||||
|
||||
9
stack-builder/fragments/test-nova
Normal file
9
stack-builder/fragments/test-nova
Normal file
@@ -0,0 +1,9 @@
|
||||
# This gives the controller access to the floating network
|
||||
# without entering the namespace
|
||||
ifconfig br-ex 172.16.2.1 netmask 255.255.255.0 up
|
||||
|
||||
# allow compute nodes to come up
|
||||
sleep 20
|
||||
|
||||
# This is a basic test of nova, keystone and quantum functionality
|
||||
/tmp/test_nova.sh
|
||||
@@ -9,12 +9,11 @@
|
||||
the metadata
|
||||
"""
|
||||
|
||||
import json
|
||||
import yaml
|
||||
import os
|
||||
|
||||
hiera_dir = '/etc/puppet/data'
|
||||
metadata_path = '/mnt/config/openstack/latest/meta_data.json'
|
||||
metadata_path = '/root/config.yaml'
|
||||
|
||||
#debug
|
||||
#metadata_path = './sample.json'
|
||||
@@ -23,7 +22,7 @@ metadata_path = '/mnt/config/openstack/latest/meta_data.json'
|
||||
def config_builder():
|
||||
# load metadata from config-drive
|
||||
with open(metadata_path, 'r') as metadata:
|
||||
meta = json.loads(metadata.read())['meta']
|
||||
meta = yaml.load(metadata.read())
|
||||
print meta
|
||||
|
||||
# Set values specified in config_drive
|
||||
@@ -38,10 +37,19 @@ def config_builder():
|
||||
y = yaml.load(hiera_file.read())
|
||||
for key, value in meta.items():
|
||||
if (y != None and key in y):
|
||||
print "Setting : " + key + " with " + value
|
||||
y[key] = str(value)
|
||||
print "Setting : " + key + " with " + str(value)
|
||||
y[key] = value
|
||||
|
||||
with open(path + '/' + yaml_file, 'w') as hiera_file:
|
||||
hiera_file.write(yaml.dump(y, default_flow_style=False))
|
||||
|
||||
config_builder()
|
||||
#config_builder()
|
||||
|
||||
def facter_config():
|
||||
with open(metadata_path, 'r') as metadata:
|
||||
meta = yaml.load(metadata.read())
|
||||
print meta
|
||||
for key,value in meta.items():
|
||||
os.environ[key] = str(value)
|
||||
|
||||
facter_config()
|
||||
|
||||
63
stack-builder/metadata.py
Normal file
63
stack-builder/metadata.py
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
stack-builder.metadata
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module will load in relevant environment variables
|
||||
and config from the scenario yaml in order to create a
|
||||
dictionary of metadata that will be used to build shell
|
||||
scripts, populate hiera data for puppet, and drive the
|
||||
creation of appropriate openstack resources for the
|
||||
specified scenario. Environment variables will
|
||||
override yaml data.
|
||||
"""
|
||||
import os
|
||||
import yaml
|
||||
|
||||
def import_environ_keys(metadata, prefix):
|
||||
"""
|
||||
Import any environment variables with the correct
|
||||
prefix into the metadata dictionary
|
||||
"""
|
||||
for key,value in os.environ.items():
|
||||
if key[:9] == prefix:
|
||||
metadata[key[9:]] = value
|
||||
return metadata
|
||||
|
||||
def import_yaml(path, files):
|
||||
"""
|
||||
"""
|
||||
metadata = {}
|
||||
|
||||
for filename in files:
|
||||
if os.path.exists(path+filename+'.yaml'):
|
||||
with open(path+filename+'.yaml', 'r') as f:
|
||||
y = yaml.load(f.read())
|
||||
if y:
|
||||
for key, value in y.items():
|
||||
metadata[key] = value
|
||||
return metadata
|
||||
|
||||
def build_metadata(path, scenario, config):
|
||||
"""
|
||||
Create a metadata dictionary from yaml
|
||||
and environment variables
|
||||
"""
|
||||
if config == "config":
|
||||
prefix = 'osi_conf_'
|
||||
files = ['config']
|
||||
return import_environ_keys(import_yaml(path+'/', files), prefix)
|
||||
if config == 'user':
|
||||
prefix = 'osi_user_'
|
||||
files = ['user', 'jenkins', 'user.common', 'user.'+scenario]
|
||||
return import_environ_keys(import_yaml(path+'/hiera_data/',files), prefix)
|
||||
else:
|
||||
print "Invalid config type: choose from 'user' and 'conf'"
|
||||
|
||||
def show(n, q, args):
|
||||
hostname = args.node
|
||||
yaml_dir = args.yaml_dir
|
||||
scenario = args.scenario
|
||||
config = args.config
|
||||
|
||||
print yaml.dump(build_metadata(yaml_dir, scenario, config), default_flow_style=False)
|
||||
Reference in New Issue
Block a user