diff --git a/deployment/virtualbox/pybox/Parser.py b/deployment/virtualbox/pybox/Parser.py new file mode 100644 index 00000000..f9dd5fef --- /dev/null +++ b/deployment/virtualbox/pybox/Parser.py @@ -0,0 +1,398 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Parser to handle command line arguments +""" + + +import argparse +import getpass + + +def handle_args(): + """ + Handle arguments supplied to the command line + """ + + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + + """ + ************************************** + * Setup type & install configuration * + ************************************** + """ + parser.add_argument("--setup-type", help= + """ + Type of setup: + AIO-SX + AIO-DX + STANDARD + STORAGE + """, + choices=['AIO-SX', 'AIO-DX', 'STANDARD', 'STORAGE'], + type=str) + parser.add_argument("--controllers", help= + """ + Number of controllers: + 1 - single controller + 2 - two controllers + """, + choices=[1, 2], + type=int, + default=2) + parser.add_argument("--workers", help= + """ + Number of workers: + 1 - single worker + 2 - two workers + etc. + """, + type=int, + default=2) + parser.add_argument("--storages", help= + """ + Number of storage nodes: + 1 - single storage node + 2 - two storage nodes + etc.\n + """, + type=int, + default=2) + parser.add_argument("--from-stage", help= + """ + Start stage. + For a list of stages run --list-stages + \n + """, + type=str) + parser.add_argument("--to-stage", help= + """ + End stage. + For a list of stages run --list-stages + \n + """, + type=str) + parser.add_argument("--custom-stages", help= + """ + Custom, comma separated list of stages. + For a list of stages run --list-stages + \n + """, + type=str, + default=None) + + """ + ****************************************** + * Config folders and files configuration * + ****************************************** + """ + parser.add_argument("--iso-location", help= + """ + Location of ISO including the filename: + /folk/myousaf/bootimage.ISO + """, + type=str) + parser.add_argument("--config-files-dir", help= + """ + Directory with config files, scripts, images (i.e. + lab_setup.sh, lab_setup.conf, ...) that are needed + for the install. All files at this location are + transfered to controller-0 in /home/wrsroot. You + can add you own scripts that you need to be + present on the controller. Caution: rsync will + follow links and will fail if links are broken! + Use --config-files-dir-dont-follow-links + instead. Also, you can use both options for + different folders. + """, + type=str) + parser.add_argument("--config-files-dir-dont-follow-links", help= + """ + Same as --config-files-dir but keep symbolic link as is. + """, + type=str) + parser.add_argument("--config-controller-ini", help= + """ + Path to the local config_controller .ini. This + file is transfered to the controller. NOTE: OAM + configuration in this ini is updated dynamically + based on networking related args. + (e.g. stx_config.ini_centos, + ~/stx_config.ini_centos, /home/myousaf ...). + """, + type=str) + parser.add_argument("--vbox-home-dir", help= + """ + This is the folder where vbox disks will be + placed. e.g. /home or /folk/cgts/users + The disks will be in /home/wzhou/vbox_disks/ or + /folk/cgts/users/wzhou/vbox_disks/ + """, + type=str, default='/home') + parser.add_argument("--lab-setup-conf", help= + """ + Path to the config file to use + """, + action='append') + """ + ************************************** + * Disk number and size configuration * + ************************************** + """ + parser.add_argument("--controller-disks", help= + """ + Select the number of disks for a controller VM. default is 3 + """, + type=int, default=3, choices=[1, 2, 3, 4, 5, 6, 7]) + parser.add_argument("--storage-disks", help= + """ + Select the number of disks for storage VM. default is 3 + """, + type=int, default=3, choices=[1, 2, 3, 4, 5, 6, 7]) + parser.add_argument("--worker-disks", help= + """ + Select the number of disks for a worker VM. default is 2 + """, + type=int, default=2, choices=[1, 2, 3, 4, 5, 6, 7]) + parser.add_argument("--controller-disk-sizes", help= + """ + Configure size in MiB of controller disks as a comma separated list. + """, + type=str) + parser.add_argument("--storage-disk-sizes", help= + """ + Configure size in MiB of storage disks as a comma separated list. + """, + type=str) + parser.add_argument("--worker-disk-sizes", help= + """ + Configure size in MiB of worker disks as a comma separated list. + """, + type=str) + """ + ************** + * Networking * + ************** + """ + parser.add_argument("--vboxnet-name", help= + """ + Which host only network to use for setup. + """, + type=str) + parser.add_argument("--vboxnet-ip", help= + """ + The IP address of the host only adapter as it + is configured on the host (i.e. gateway). This is also used to + update GATEWAY_IP in [OAM_NETWORK] of config_controller config file. + """, + type=str) + parser.add_argument("--add-nat-interface", help= + """ + Add a new NAT interface to hosts. + """, + action='store_true') + parser.add_argument("--controller-floating-ip", help= + """ + OAM floating IP. + """, + type=str) + parser.add_argument("--controller0-ip", help= + """ + OAM IP of controller-0. This is also used to + update IP_ADDRESS in [OAM_NETWORK] of + config_controller config file of an AIO SX setup. + This should not be the floating IP. + """, + type=str) + parser.add_argument("--controller1-ip", help= + """ + OAM IP of controller-1. + This should not be the floating IP. + """, + type=str) + parser.add_argument("--vboxnet-type", help= + """ + Type of vbox network, either hostonly on nat + """, + choices=['hostonly', 'nat'], + type=str, + default='hostonly') + parser.add_argument("--nat-controller-floating-local-ssh-port", help= + """ + When oam network is configured as 'nat' a port on + the vbox host is used for connecting to ssh on + floating controller. No default value is + configured. This is mandatory if --vboxnet-type is + 'nat' for non AIO-SX deployments. + """, + type=str) + parser.add_argument("--nat-controller0-local-ssh-port", help= + """ + When oam network is configured as 'nat' a port on + the vbox host is used for connecting to ssh on + controller-0. This is mandatory if --vboxnet-type + is 'nat'. No default value is configured. + """, + type=str) + parser.add_argument("--nat-controller1-local-ssh-port", help= + """ + When oam network is configured as 'nat' a port on + the vbox host is used for connecting to ssh on + controller-1. No default value is configued. This + is mandatory if --vboxnet-type is 'nat' for non + AIO-SX deployments or if second controller is + installed. + """, + type=str) + parser.add_argument("--ini-oam-cidr", help= + """ + The IP network and mask for the oam net, used to + update CIDR value in [OAM_NETWORK] of + config_controller config file. Default is + 10.10.10.0/24 + """, + type=str) + parser.add_argument("--ini-oam-ip-start-address", help= + """ + The start for the oam net allocation, used to + update IP_START_ADDRESS value in [OAM_NETWORK] of + config_controller config file. Not needed for AIO + SX setups. + """, + type=str) + parser.add_argument("--ini-oam-ip-end-address", help= + """ + The end for the oam net allocation, used to update + IP_END_ADDRESS value in [OAM_NETWORK] of + config_controller config file. Not needed for AIO + SX setups. + """, + type=str) + """ + ****************** + * Custom scripts * + ****************** + """ + parser.add_argument("--script1", help= + """ + Name of an executable script file plus options. + Has to be present in --config-files-dir. + It will be transfered to host in rsync-config + stage and executed as part of custom-script1 + stage. + Example: --script1 'scripts/k8s_pv_cfg.sh,50,ssh,user' + Contains a comma separated value of: + ,,, Where: + script_name = name of the script, either .sh or .py; + timeout = how much to wait, in seconds, before considering failure; + serial/ssh = executed on the serial console; + user/root = as a user or as root (sudo || +# +# Note: The user MUST specify the journal location if multiple journal backing +# devices are configured in JOURNAL_DEVICES, otherwise is optional. +# +# Note: A storage tier is required for each OSD. The default +# "$CEPH_TIER_DEFAULT" should be used for all OSDs defined in a +# basic one tier configuration +# +# Examples: +# OSD_DEVICES="/dev/sdb|${CEPH_TIER_DEFAULT} /dev/sdc|${CEPH_TIER_DEFAULT}" - osd volumes with collocated journals +# OSD_DEVICES="/dev/sdb|${CEPH_TIER_DEFAULT}|10240 /dev/sdc|${CEPH_TIER_DEFAULT}|10240" - journals of 10GiB on a single journal device +# OSD_DEVICES="/dev/sdb|${CEPH_TIER_DEFAULT}|10240|/dev/sdg /dev/sdc|${CEPH_TIER_DEFAULT}|10240|/dev/sdi" - journal location is specified +#OSD_DEVICES="/dev/sdb|${CEPH_TIER_DEFAULT}" + +## File System storage size +DATABASE_FS_SIZE=0 +IMAGE_FS_SIZE=0 +BACKUP_FS_SIZE=0 +IMG_CONVERSIONS_FS_SIZE=0 + +FS_RESIZE_TIMEOUT=180 +FS_RESIZE_DEGRADE_TIMEOUT=120 + +# 60 minutes +DRBD_SYNC_TIMEOUT=3600 + +## Common resource names and prefixes +## +GROUPNO=0 + +TENANT1=tenant1 +TENANT2=tenant2 + +## The openstack user domain for creation of tenant user. +## The openstack project domain for creation of tenant projects. +## Since these are mapped to the SQL based Identity backend, they +## shall go into the Default domain. +OPENSTACK_USER_DOMAIN="default" +OPENSTACK_PROJECT_DOMAIN="default" + +FIRSTWORKERID="" +LASTWORKERID="" +WORKERIDS="" +ADMIN_USER="admin" +MEMBER_ROLE="member" + +## Numa node testing. Set to "node0", "node1", or "float" to set the numa +## node for a specific tenant. By default the tenant VMs are pinned to cpus +## but not on a specific numa node. +## +TENANT1NODE="" +TENANT2NODE="" + +## The maximum number of NUMA nodes of the system +## +NUMA_NODE_COUNT=2 + +## Time Synchronization service: NTP or PTP +## +TIMESERVICE="NTP" +NTPSERVERS="132.163.4.102,129.6.15.30,198.111.152.100" + +## Tenant network configurations. +## +DNSNAMESERVERS="147.11.57.133 128.224.144.130 147.11.57.128" +MGMTIPVERSION=4 +MGMTNETS=1 +MGMTSUBNETS=("192.168.101.0/27,192.168.101.32/27,192.168.101.64/27", "192.168.201.0/27,192.168.201.32/27,192.168.201.64/27") +MGMTDVR=("no" "no") +MGMTIPV6ADDRMODE="dhcpv6-stateful" +EXTIPVERSION=4 +EXTERNALGWIP="192.168.1.1" +EXTERNALCIDR="192.168.1.0/24" +EXTERNALSNAT="no" + + +## Provider network configurations. Each lab has dedicated VLANs assigned to it. +## An exception to this is that the external network VLAN is shared across all +## of the labs. The configuration syntax which is describes belows allows to +## define an arbitrary number of provider networks on a per lab basis and then +## to refer to those provider networks by name when defining data interfaces. +## Since in the large lab configuration we need to distinguish provider +## networks from each group/island we prefix their name with "groupX-" where X +## is the group number. To avoid having to specify the group number in several +## places we leave it off of all variable specifications and the prefix is +## added dynamically where needed. For example, "ext0" below will become +## "group0-ext0" when actually created; and "data0" will become "group0-data0" +## when created. +## +## Example: +## +## PROVIDERNETS="vxlan|ext0|1500|4-4|shared|239.0.2.1|4789|11 \ +## vlan|data0|1500|600-615|tenant1 \ +## vlan|data0b|1500|700-731|shared \ +## vlan|data1|1500|616-631|tenant2" +## +PROVIDERNETS="vlan|ext0|1500|10-10|shared \ +vlan|data0|1500|600-615|tenant1 \ +vlan|data0b|1500|700-731|shared \ +vlan|data1|1500|616-631|tenant2" + +## Enable vlan transparency on all provider networks that have not provided a +## setting for this feature. For now this is a global attribute. In the +## future we will add control on a per provider network and tenant network +## basis. +## +VLAN_TRANSPARENT="False" + + +## Enable vlan transparency on internal networks. This is an alternative to +## using trunks, and if True, then trunks will not be provisioned. +VLAN_TRANSPARENT_INTERNAL_NETWORKS="True" + +## Tenant network provider network manual configurations. Because our lab +## environment dictates that we use specific VLAN values for specific tenants +## (i.e., to reach NAT box, or to be carried by specific data interface) we +## need to manual set the provider network and segmentation id values. +## +EXTERNALPNET="vlan|ext0|10" +INTERNALPNET="vlan|data0b" + + +## Interface configurations. The script supports setting up mgmt, infra, oam, +## data, pci-passthrough and sriov interfaces. The syntax for all types is +## similar but each type can refine how parameters are specified. There are +## some attribute that are common to each type. Each worker node can have a +## custom value for each variable by prepending the node name; otherwise the +## default (unnamed) variable is used. These are some examples: +## +## NOTE: the provider network names are supplied without the "group0-" prefix. +## It is added as needed when the data is consumed to avoid having to specify +## the groupno in multiple places. +## +## NOTE: the PCI addresses are a composite of the actual PCIADDR and PCIDEV +## values separated with a "+" character. PCIDEV is assumed to be "0" if not +## specified. This value is only relevant for Mellanox devices. +## +## NOTE: the device field can be specified as a comma separated list of +## PCIADDR+PCIDEV values or a comma separated list of interface names. +## Interface names are autogenerated based on type and index in the *_INTERFACE +## list variable (i.e., data0, infra1, etc...). For VLAN interfaces the VLANID +## is used instead of the index (e.g., vlan11). The devices are created in +## this order so you can only refer to names that have already been created by +## the time the dependent device gets created: mgmt data infra pthru sriov +## +## Some examples: +## +## DATA_INTERFACES="ethernet|0000:00:09.0+0|1500|data0 ethernet|0000:00:0a.0+0|1500|data1" +## DATA_INTERFACES="ethernet|0000:00:09.0+0|1500|data0 ethernet|0000:00:0a.0+0|1500|data1 vlan|data0|1500|ext0|11" +## DATA_INTERFACES="ae|0000:00:09.0+0,0000:00:0a.0+0|1500|data0,data1" +## WORKER2_DATA_INTERFACES="ae|0000:00:06.0+0,0000:00:07.0+0|1500|data0,data1" +## INFRA_INTERFACES="vlan|data0|1500|none|12" +## PTHRU_INTERFACES="ethernet|0000:84:00.0|1500|data0" +## SRIOV_INTERFACES="ethernet|0000:84:00.0|1500|data0|4" +## +DATA_INTERFACES="ethernet|0000:00:09.0+0|1500|ext0,data0,data0b ethernet|0000:00:0a.0+0|1500|data1" +OAM_INTERFACES="ethernet|${DEFAULT_IF0}|1500|none" + +## IP addresses and routes for data interfaces if using VXLAN provider +## networks. Must be overridden in lab specific config file since each worker +## node must have its own IP addresses. +## +## IPADDRS must be in the form: "IP1/PREFIX,IP2/PREFIX,..." +## IPROUTES must be in the form: "NET1/PREFIX/GWY,NET2/PREFIX/GWY,..." +## +## There must be an entry for each interface that uses VXLAN provider networks +## on each node. The IPADDRS and IPROUTES variables must be double prefixes; +## once for node name and once for interface name. Example: +## +## WORKER0_DATA0_IPADDRS="192.168.57.2/24,fd00:0:0:1::2/64" +## WORKER0_DATA0_IPROUTES="0.0.0.0/0/192.168.57.1,::/0/fd00:0:0:1::1" +## +## Alternatively, IP addresses on a per worker node can be omitted if an IP +## address pool is defined. The pools must be constrained to the IP addresses +## that are assigned to the lab being configured. +## +## DATA0_IPPOOLS="data0v4|192.168.57.0|24|random|192.168.57.2-192.168.57.5 \ +## data0v6|fd00:0:0:1::|64|random|fd00:0:0:0:1::2-fd00:0:0:1::5" +## DATA1_IPPOOLS="data1v4|192.168.58.0|24|random|192.168.58.2-192.168.58.5" +## data1v6|fd00:0:0:2::|64|random|fd00:0:0:0:2::2-fd00:0:0:2::5" +## + +## VXLAN specific attributes. The GROUP and PORT array can have multiple +## values. The 0th entry will be used for the tenant1 provider network +## ranges, the Nth entry will be used for the tenant2 provider networ ranges, +## and the internal network ranges will alternate between each value therefore +## allowing for a mixture of addresses and ports in the configuration. Each +## internal range will have up to VXLAN_INTERNAL_STEP entries +## +VXLAN_GROUPS="239.0.1.1 ff0e::1:1 239.0.1.2 ff0e::1:2" +VXLAN_PORTS="8472 4789" +VXLAN_TTL=1 +VXLAN_INTERNAL_STEP=4 + +## Board Management attributes. Configures the BMC (e.g., iLO3, iLO4, etc..) +## for boards that support it. +## +## The BM_USERNAME, BM_PASSWORD, and BM_TYPE variables can be overriden on +## a per-lab basis or a per-worker basis in the lab configuration file. +## +## The BM_MACADDR variable must be overridden on a per-worker basis in the +## lab configuration file. +## +BM_ENABLED="no" +BM_TYPE="ilo4" +BM_USERNAME="Administrator" +BM_PASSWORD="remoteADMIN" +BM_MACADDR="" + +## Set the default VIF model of the first NIC on all VM instances to "virtio". +## Do not change this as it may cause unintended side-effects and NIC +## reordering in the VM. +MGMT_VIF_MODEL="virtio" + +## Common paths +IMAGE_DIR=${HOME}/images +USERDATA_DIR=${HOME}/userdata + +## Networking test mode +## choices={"layer2", "layer3"} +## +NETWORKING_TYPE="layer3" + +## Provider Network type +## choices={"vlan", "vxlan", "mixed"} +## +## warning: If setting to "vxlan" there must be a DATA0IPADDR and DATA1IPADDR +## set for each worker node. +PROVIDERNET_TYPE="vlan" + +## Special test mode for benchmarking only. Set to "yes" only if only a +## single VM pair will exist at any given time. This will cause only a single +## pair of tenant and internal networks to be created. This is to facilitate +## sequentially booting and taring down VMs of different types to benchmark +## their network performance without having to cleanup the lab and install a +## different configuration. +## +REUSE_NETWORKS="no" + +## Special test mode for benchmarking only. Set to "yes" to force tenant +## networks to be shared so that a single VM can have a link to each tenant's +## tenant network. This is to force traffic from ixia to be returned directly +## to ixia without first passing through an internal network and another VM. +## In this mode it is expected that only a single tenant's VM will be launched +## at any given time (i.e., it will not work if 2 VMs are both sharing each +## others tenant network). +## +## resulting in: +## +## ixia-port0 +---+ tenant1-net0 +------+ +## VM (tenant1) +## ixia-port1 +---+ tenant2-net0 +------+ +## +## instead of: +## +## ixia-port0 +---+ tenant1-net0 +------+ VM (tenant1) ---+ +## | +## internal-net0 +## | +## ixia-port1 +---+ tenant2-net0 +------+ VM (tenant2) ---+ +## +SHARED_TENANT_NETWORKS="no" + +## Special test mode for benchmarking only. Set to "yes" to create a separate +## tenant network and router which will sit between Ixia and the regular layer2 +## tenant networks. The purpose is to insert an AVR router in to the Ixia +## traffic path so that its performance can be evaluated by vbenchmark. This +## requires that SHARED_TENANT_NETWORKS be set to "yes" because the end goal is +## to have a single VM bridge traffic between 2 tenant networks. +## +## resulting in: +## +## ixia-port0 +---+ tenant1-ixia-net0 +---+ router +---+ tenant1-net0 +------+ +## VM (tenant1) +## ixia-port1 +---+ tenant2-ixia-net0 +---+ router +---+ tenant2-net0 +------+ +## +## +## instead of: +## +## ixia-port0 +---+ tenant1-net0 +------+ +## VM (tenant1) +## ixia-port1 +---+ tenant2-net0 +------+ +## +ROUTED_TENANT_NETWORKS="no" + +## Enables/Disables the allocation of floating IP addresses for each VM. This +## functionality is disabled by default because our NAT box is configured with +## static routes that provide connectivity directly to the tenant networks. +## The NAT box, as its name implies, already does NAT so we do not need this +## for day-to-day lab usage. It should be enabled when explicitly testing +## this functionality. +FLOATING_IP="no" + +## DHCP on secondary networks +## +INTERNALNET_DHCP="yes" +TENANTNET_DHCP="yes" + +## to only create the first NIC on each VM. +EXTRA_NICS="yes" + +## Enable config-drive instead of relying only on the metadata server +## +CONFIG_DRIVE="no" + +## Force DHCP servers to service metadata requests even if there are routers on +## the network that are capable of this functionality. Useful for SDN testing +## because routers exist but are implemented in openflow rules and are not +## capable of servicing metadata requests. +## +FORCE_METADATA="no" + +## Root disk volume type and image size in GB +## choices={"glance", "cinder"} +## +IMAGE_TYPE="cinder" + +## Number of vswitch/shared physical CPU on worker nodes on first numa node +## The PCPU assignment can also be specified as a mapping between numa node and count +## with the following format. +## XXXX_PCPU_MAP=":,:" +VSWITCH_PCPU=2 +SHARED_PCPU=0 + +## Setup custom VCPU model for each VM type if necessary +## +DPDK_VCPUMODEL="SandyBridge" + +## Add extra functions to guest userdata +## +#EXTRA_FUNCTIONS="pgbench" +EXTRA_FUNCTIONS="" + +## Logical interface default configuration values. These can be specified +## directly when defining *_INTERFACES variables, or with a ${IFTYPE}_* +## refinement, or left to use the DEFAULT_* value. +## +DEFAULT_AEMODE="balanced" +DEFAULT_AEHASH="layer2" +MGMT_AEMODE="802.3ad" +MGMT_AEHASH="layer3+4" + + +## Test creating and apply profiles on nodes +## +TEST_PROFILES="yes" + +## Maximum number of networks physically possible in this lab +## +MAXNETWORKS=1 + +## If the Ixia port has less MAX throughput than the NIC under test, we +## use more than 1 Ixia port to achieve the desired line rate. This variable +## specifies port pairs to acheive the desired line rate. +IXIA_PORT_PAIRS=1 + +## Maximum number of VLANs per internal network +## +MAXVLANS=1 +FIRSTVLANID=0 + +##If Compute nodes a virtual and controller is hardware [yes/no] +VIRTUALNODES="no" + +## Controls the number of VM instances that share the tenant data networks that +## go to ixia. This exists to allow more VM instances in labs without having +## to increase the number of VLAN instances allocated to the lab. This only +## works for NETWORKING_TYPE=layer3. +## +VMS_PER_NETWORK=1 + +## Enable/disable VIRTIO multi-queue support +VIRTIO_MULTIQUEUE="false" + +## Custom Flavors to create +## type or a string with parameters, e.g. +## FLAVORS="|id=1000,cores=1|mem=512|disk=2|dedicated|heartbeat|numa_node.0=0|numa_node.1=0|numa_nodes=2|vcpumodel=SandyBridge|sharedcpus" +## - is required +## - id - number +## - cores 1-N +## - mem in MB, +## - disk in GB (volume size), use +## - dedicated - dedicated if in list used, otherwise not +## - heartbeat - heartbeat if in list, otherwise not +## - numa_node.0 - Pin guest numa node 0 to a physical numa node. Default not pinned +## - numa_node.1 - Pin guest numa node 1 to a physical numa node. Default not pinned +## - numa_nodes - Expose guest to specified number of numa cores (spread across zones) +## - storage - Nova storage host: local_lvm (default), local_image, remote +## - vcpumodel - default Sandy Bridge +## - sharedcpus - default disabled +## If multiple flavors, append them with space between: +## FLAVORS="flavor1|cores=1|mem=512|disk=2 flavor2|cores=2|mem=1024,disk=4" +## Default values +FLAVORS="" + +## Number of VMs to create. This can be simply a number for each interface +## type or a string with parameters, e.g. +## AVPAVPAPPS="2|flavor=small|disk=2|image=tis-centos-guest|dbench|glance|voldelay|volwait" +## - Interface type is defined by the name (AVPAPPS, VIRTIOAPPS, DPDKAPPS, SRIOVAPPS, or PCIPTAPPS) +## - Only the first parameter is (# of VM pairs) required. Absence means use default +## - First number is number of pairs to create +## - flavor - base flavor name to use for defaults, default is default flavor for the app type +## - disk in GB (volume size), use +## - image is base image file name - Image type raw and filename extension .img is assumed +## - imageqcow2 is base qcow2 image file name. Image type qcow2 and filename extension qcow2 is assumed. +## - cache_raw - do background caching of qcow2 image to raw +## - glance - use glance (default is IMAGE_TYPE) Note: use voldelay with this to prevent volume creation +## - dbench - enable dbench in VM, default is disabled +## - nopoll - Don't poll in boot line for VM. Allows faster booting of VMs +## - voldelay - Delay creating volume until VM boot if present, otherwise create volume initially (N/A if using glance) +## - volwait - If using voldelay, will wait for volume creation in volume launch, otherwise no wait (N/A if using glance) +## AVPAPPS=2 would remain legal (all defaults) +## If multiple AVP flavors of same type, append them with space between: +## AVPAPPS="2|flavor=small|disk=2 4|flavor=large|disk=4" +## Default values +DPDKAPPS=0 +AVPAPPS=0 +VIRTIOAPPS=0 +SRIOVAPPS=0 +PCIPTAPPS=0 +VHOSTAPPS=0 + +## Default flavors (if you change any of these make sure that +## setup_minimal_flavors creates the required flavors otherwise set +## FLAVOR_TYPES="all" +## +DPDKFLAVOR="medium.dpdk" +AVPFLAVOR="small" +VIRTIOFLAVOR="small" +SRIOVFLAVOR="small" +PCIPTFLAVOR="small" +VHOSTFLAVOR="medium.dpdk" + +## Set to "all" if you want additional flavors created; otherwise only the +## ones in DPDKFLAVOR, AVPFLAVOR, VIRTIOFLAVOR, SRIOVFLAVOR, and PCIPTFLAVOR will be created +FLAVOR_TYPES="minimal" + +## Network QoS values +EXTERNALQOS="external-qos" +INTERNALQOS="internal-qos" +EXTERNALQOSWEIGHT=16 +INTERNALQOSWEIGHT=4 +MGMTQOSWEIGHT=8 + +## Nova Local Storage Settings +## +## LOCAL_STORAGE="setting" +## Default settings used to control local storage provisioning +## +## Format of default_setting or host_setting is: +## mode|pvs|lv_calc_mode|lv_fixed size +## +## Parameters: +## mode - Determines the instance disk storage source/format of the +## node: +## +## local_lvm - nova-local volume group is created and physical volumes are +## added. The instances logical volume is created and +## mounted at /etc/nova/instances. The instances logical +## volume uses a subset of the available space within the +## volume group. Instance disks are logical volumes created +## out of the available space in the nova-local volume group +## local_image - nova-local volume group is created and physical volumes +## are added. The instances logical volume is created and +## mounted at /etc/nova/instances. The instances logical is +## sized to use 100% of the volume group. Instance disks are +## file based CoW images contained within +## /etc/nova/instances. +## remote - nova-local volume group is created and physical volumes are +## added. The instances logical volume is created and +## mounted at /etc/nova/instances. The instances logical is +## sized to use 100% of the volume group. Instance disks are +## RBD based from the ceph ephemeral pool. +## +## +## pvs - Disk or partition device node names to add to the nova local +## volume group. +## +## - comma separated device list: /dev/sdc,/dev/sdd +## +## +## lv_calc_mode - Mode for calculating the size of the instance logical +## volume +## +## fixed - size provided by lv_fixed_size +## +## +## lv_fixed_size - size in gib (i.e 10GB). Only relevant for +## local_lvm mode. Ignored for remote or local_image modes +## +## All 5 parameters must be specified. +## +## Examples: +## nova-local uses disk partition, instances backed by remote storage +## LOCAL_STORAGE="remote|/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0-part6|fixed|0" +## +## nova-local on /dev/disk/by-path/pci-0000:00:0d.0-ata-2.0 instances backed by remote storage +## LOCAL_STORAGE="remote|/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0|fixed|0" +## +## nova-local uses disk partition, instances backed by local CoW image files +## LOCAL_STORAGE="local_image|/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0-part6|fixed|0" +## +## nova-local uses disk partition, instances backed by local LVM disks +## LOCAL_STORAGE="local_lvm|/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0-part6|fixed|2" +## +## nova-local uses multiple disks, instances backed by local CoW image files +## LOCAL_STORAGE="local_image|/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0,/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0|fixed|0" +## +## nova-local uses disk and partition, instances backed by local LVM disks +## LOCAL_STORAGE="local_lvm|/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0,/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0-part4|fixed|2" +## +LOCAL_STORAGE="local_image|fixed|10" +#CONTROLLER0_LOCAL_STORAGE="local_lvm|/dev/sdc|fixed|10" + +## Neutron MAC address overrides +NEUTRON_BASE_MAC="" +NEUTRON_DVR_BASE_MAC="" +NEUTRON_EXTENSION_DRIVERS="" +NEUTRON_PORT_SECURITY="" + +## BGP Default Configuration +BGP_ENABLED="no" +BGP_SERVICE_PLUGINS= + +## Software Defined Networking (SDN) Default Configuration +SDN_ENABLED=${sdn_enabled:-no} +SDN_NETWORKING="opendaylight" + +## SDN OpenDaylight Specific Configuration +SDN_ODL_MECHANISM_DRIVERS="vswitch,opendaylight_v2,sriovnicswitch,l2population" +SDN_ODL_SERVICE_PLUGINS="odl-router_v2" +SDN_ODL_USERNAME=admin +SDN_ODL_PASSWORD=admin +SDN_ODL_PORT_BINDING_CONTROLLER=legacy-port-binding + +## SFC Specific Configurations +## (note: these are on by default in the system configuration but exist here so +## that they can be modified when enabling SDN with SFC) +SFC_ENABLED="no" +SFC_SERVICE_PLUGINS="flow_classifier,sfc" +SFC_SFC_DRIVERS="avs" +SFC_FLOW_CLASSIFIER_DRIVERS="avs" + + +## Timeout to wait for system service-parameter-apply to complete +SERVICE_APPLY_TIMEOUT=30 + +## PCI vendor/device IDs +PCI_VENDOR_VIRTIO="0x1af4" +PCI_DEVICE_VIRTIO="0x1000" +PCI_DEVICE_MEMORY="0x1110" +PCI_SUBDEVICE_NET="0x0001" +PCI_SUBDEVICE_AVP="0x1104" + + +if [ ! -f ${CONFIG_FILE} ]; then + ## If the main config file does not exist then check to see if the group0 + ## file exists + if [ -f ${GROUP0_FILE} ]; then + CONFIG_FILE=${GROUP0_FILE} + fi +fi + +if [ ! -f ${CONFIG_FILE} ]; then + ## User must provide lab specific details + echo "Missing config file: ${CONFIG_FILE} or ${GROUP0_FILE}" + exit 1 +fi + +trim() +{ + local trimmed="$1" + + # Strip leading space. + trimmed="${trimmed## }" + # Strip trailing space. + trimmed="${trimmed%% }" + + echo "$trimmed" +} + +# Reset chain by clearing the chain config file +if [ "${CLEAR_CHAIN}" == "yes" ]; then + rm -f ${CONFIG_CHAIN_FILE} +fi + +# Chain multiple config files and source them. +# Each time we call the script with a different config file +# that file is added to the chain and will be sourced in the +# same order as they were added. +# Subsequent call to the script don't need to include the +# config file as a parameter as it's already in the chain +if [ ! -f ${CONFIG_CHAIN_FILE} ]; then + touch ${CONFIG_CHAIN_FILE} + echo $CONFIG_FILE > ${CONFIG_CHAIN_FILE} +else + readarray CHAIN_CONF < ${CONFIG_CHAIN_FILE} + ADDED=0 + for cfg_file in "${CHAIN_CONF[@]}"; do + trim_conf="$(trim $cfg_file)" + if [ "${trim_conf}" == "${CONFIG_FILE}" ] ; then + ADDED=1 + break + fi + done + + if [ $ADDED -eq 0 ]; then + echo $CONFIG_FILE >> $CONFIG_CHAIN_FILE + fi +fi + +## *** WARNING *** +## +## Source the per-lab configuration settings. Do not place any user +## overrideable variables below this line. +## +## Support for chaining config files: +## The script now supports adding additional settings from extra +## config files and chaining such configurations. +## Example: +## ./lab_setup.sh +## ./lab_setup.sh -f add_lvm.conf +## ./lab_setup.sh -f add_ceph.conf +## ./lab_setup.sh +## +## Behavior: +## - first call will add the default lab_setup.conf file to the chain +## and source it +## - the second call will add add_lvm.conf to the chain and source +## lab_setup.conf and add_lvm.conf (in this order) +## - the second call will add add_ceph.conf to the chain and source +## lab_setup.conf, add_lvm.conf and add_ceph.conf (in this order) +## - the last call will not add anything new to the chain, but will +## source all other config files anyway in the chain. +## +## Purpose of this change is to allow adding features or config +## options to a setup, no by changing the existing config file, +## but by adding a new config file with just the new features and +## run lab_setup again. +## Thus we can have a setup without any cinder backend, run tests +## on it, then easily add LVM and/or ceph on it and run the same +## (or other tests). +## +## *** WARNING *** + +readarray CHAIN_CONF < ${CONFIG_CHAIN_FILE} + +for cfg_file in "${CHAIN_CONF[@]}"; do + trim_conf="$(trim $cfg_file)" + echo "Sourcing ${trim_conf} from the config chain" + set +u + source ${cfg_file} + set -u +done + +rm -f ${STATUS_FILE} + +## Set a per-group status file +GROUP_STATUS=${STATUS_FILE}.group${GROUPNO} +LOG_FILE=${LOG_FILE:-"${HOME}/lab_setup.group${GROUPNO}.log"} + +## Overwrite storage backend user settings based on Cinder & Glance configured backends +## +if [[ ${CINDER_BACKENDS} =~ "lvm" ]]; then + CONFIGURE_STORAGE_LVM="yes" +fi + +if [[ ${CINDER_BACKENDS} =~ "ceph" ]]; then + CONFIGURE_STORAGE_CEPH="yes" +fi + +if [[ ${GLANCE_BACKENDS} =~ "ceph" ]]; then + CONFIGURE_STORAGE_CEPH="yes" +fi + + +## Variables which can be affected by user overrides +## +TENANTNODES=("${TENANT1NODE}" "${TENANT2NODE}") +TENANTS=("${TENANT1}" "${TENANT2}") +PROVIDERNET0="group${GROUPNO}-data0" +PROVIDERNET1="group${GROUPNO}-data1" +EXTERNALNET="external-net${GROUPNO}" +EXTERNALSUBNET="external-subnet${GROUPNO}" +INTERNALNET="internal${GROUPNO}-net" +INTERNALSUBNET="internal${GROUPNO}-subnet" + +DEFAULT_BOOTDIR=${HOME}/instances +BOOTDIR=${HOME}/instances_group${GROUPNO} +BOOTCMDS=${BOOTDIR}/launch_instances.sh +HEATSCRIPT=${BOOTDIR}/heat_instances.sh + +## All combined application types +ALLAPPS="${DPDKAPPS} ${AVPAPPS} ${VIRTIOAPPS} ${SRIOVAPPS} ${PCIPTAPPS} ${VHOSTAPPS}" + +## Total counts after user overrides +## +APPCOUNT=0 +APPS=($ALLAPPS) +for INDEX in ${!APPS[@]}; do + ENTRY=${APPS[${INDEX}]} + DATA=(${ENTRY//|/ }) + NUMVMS=${DATA[0]} + APPCOUNT=$((${APPCOUNT} + ${NUMVMS})) +done + +NETCOUNT=$((${APPCOUNT} / ${VMS_PER_NETWORK})) +REMAINDER=$((${APPCOUNT} % ${VMS_PER_NETWORK})) +if [ ${REMAINDER} -ne 0 ]; then + NETCOUNT=$((NETCOUNT+1)) +fi + +if [ ${NETCOUNT} -gt ${MAXNETWORKS} ]; then + echo "Insufficient number of networks for all requested apps" + exit 1 +fi + +if [ ${VMS_PER_NETWORK} -ne 1 -a ${NETWORKING_TYPE} != "layer3" ]; then + echo "VMS_PER_NETWORK must be 1 if NETWORKING_TYPE is not \"layer3\"" + exit 1 +fi + +## Tenant Quotas +## network: each tenant has a network per VM plus a mgmt network +## subnet: each tenant has a subnet per network plus additional mgmt subnets +## port: each tenant has 3 ports per VM, 2 ports per network (DHCP/L3), +## 1 floating-ip per VM, 1 gateway per router, plus additional manual ports +## volume/snapshot - 2x instances to allow volume for launch script volume and heat volume +## +NETWORK_QUOTA=${NETWORK_QUOTA:-$((NETCOUNT + (2 * ${MGMTNETS})))} +SUBNET_QUOTA=${SUBNET_QUOTA:-$((NETWORK_QUOTA + 10))} +PORT_QUOTA=${PORT_QUOTA:-$(((APPCOUNT * 3) + (${NETCOUNT} * 2) + ${APPCOUNT} + 1 + 32))} +INSTANCE_QUOTA=${INSTANCE_QUOTA:-${APPCOUNT}} +CORE_QUOTA=${CORE_QUOTA:-$((INSTANCE_QUOTA * 3))} +FLOATING_IP_QUOTA=${FLOATING_IP_QUOTA:-${APPCOUNT}} +VOLUME_QUOTA=${VOLUME_QUOTA:-$((APPCOUNT * 2))} +SNAPSHOT_QUOTA=${SNAPSHOT_QUOTA:-$((APPCOUNT * 2))} + +## Admin Quotas +## network: the admin has 1 external network, and all shared internal networks +## subnet: the admin has 1 for the external network, and 1 subnet per pair of VM +## port: the admin has 1 port on the external network for DHCP (if needed) +## +SHARED_NETCOUNT=${NETCOUNT} +ADMIN_NETWORK_QUOTA=${ADMIN_NETWORK_QUOTA:-$((SHARED_NETCOUNT + 2))} +ADMIN_SUBNET_QUOTA=${ADMIN_SUBNET_QUOTA:-$((NETCOUNT + 2))} +ADMIN_PORT_QUOTA=${ADMIN_PORT_QUOTA:-"10"} + +## Two VMs per application pairs +VMCOUNT=$((APPCOUNT * 2)) + +## Prune the list of controller nodes down to the ones that are locked-online +TMP_CONTROLLER_NODES=${CONTROLLER_NODES} +CONTROLLER_NODES="" +if [ ${GROUPNO} -eq 0 ]; then + for NODE in ${TMP_CONTROLLER_NODES}; do + HOSTNAME=$(system host-list ${CLI_NOWRAP} | grep ${NODE} | awk '{if (($8 == "locked") && ($12 == "online")) {print $4;}}') + if [ -n "${HOSTNAME}" ]; then + CONTROLLER_NODES="${HOSTNAME} ${CONTROLLER_NODES}" + fi + done +fi + +AVAIL_CONTROLLER_NODES="" +if [ ${GROUPNO} -eq 0 ]; then + for NODE in ${TMP_CONTROLLER_NODES}; do + HOSTNAME=$(system host-list ${CLI_NOWRAP} | grep ${NODE} | awk '{if (($8 == "unlocked") && ($12 == "available")) {print $4;}}') + if [ -n "${HOSTNAME}" ]; then + AVAIL_CONTROLLER_NODES="${HOSTNAME} ${AVAIL_CONTROLLER_NODES}" + fi + done +fi + +## Prune the list of storage nodes down to the ones that are locked-online +TMP_STORAGE_NODES="${STORAGE_NODES} ${EXTRA_STORAGE_NODES}" +STORAGE_NODES="" +if [ ${GROUPNO} -eq 0 -o -n "${EXTRA_STORAGE_NODES}" ]; then + for NODE in ${TMP_STORAGE_NODES}; do + HOSTNAME=$(system host-list ${CLI_NOWRAP} | grep ${NODE} | awk '{if (($8 == "locked") && ($12 == "online")) {print $4;}}') + if [ -n "${HOSTNAME}" ]; then + STORAGE_NODES="${HOSTNAME} ${STORAGE_NODES}" + fi + done +fi + +## Global data to help facilitate constants that vary by VM type. These must +## all map one-to-one with the values in APPTYPES +## +APPTYPES=("PCIPT" "SRIOV" "DPDK" "AVP" "VIRTIO" "VHOST") +VMTYPES=("pcipt" "sriov" "vswitch" "avp" "virtio" "vhost") +NETTYPES=("kernel" "kernel" "vswitch" "kernel" "kernel" "vswitch") +NIC1_VIF_MODELS=("avp" "avp" "avp" "avp" "virtio" "virtio") +NIC2_VIF_MODELS=("pci-passthrough" "pci-sriov" "avp" "avp" "virtio" "virtio") + + +function get_node_list { + ## Build the list of required node numbers using both the old and new way + ## of specifying the worker id list for backwards compatibility + REQUIRED="" + if [ ! -z "${FIRSTWORKERID}" -a ! -z "${LASTWORKERID}" ]; then + REQUIRED=$(seq ${FIRSTWORKERID} ${LASTWORKERID}) + fi + REQUIRED="${REQUIRED} ${WORKERIDS//,/ }" + + ## Get the list of actual node names + HOSTS=$(system host-list ${CLI_NOWRAP} | awk '{if ($6 == "worker") print $4}') + + ## Build the list of required node names that are present + RESULT="" + for ID in ${REQUIRED}; do + if [[ "${HOSTS} " == *worker-${ID}[^0-9]* ]]; then + RESULT="worker-${ID} ${RESULT}" + fi + done + + echo $RESULT +} + +## Requery the worker node names if the config file specified first and last worker id +if [ ! -z "${FIRSTWORKERID}" -a ! -z "${LASTWORKERID}" -o ! -z "${WORKERIDS}" ]; then + NODES=$(get_node_list) +fi + +DATE_FORMAT="%Y-%m-%d %T" + +## Executes a command and logs the output +function log_command { + local CMD=$1 + local MSG="[${OS_USERNAME}@${OS_PROJECT_NAME}]> RUNNING: ${CMD}" + + set +e + if [ ${VERBOSE_LEVEL} -gt 0 ]; then + echo ${MSG} + fi + echo $(date +"${DATE_FORMAT}") ${MSG} >> ${LOG_FILE} + + if [ ${VERBOSE_LEVEL} -gt 1 ]; then + eval ${CMD} 2>&1 | tee -a ${LOG_FILE} + RET=${PIPESTATUS[0]} + else + eval ${CMD} &>> ${LOG_FILE} + RET=$? + fi + + if [ ${RET} -ne 0 ]; then + info "COMMAND FAILED (rc=${RET}): ${CMD}" + info "===========================" + info "Check \"${LOG_FILE}\" for more details, and re-run the failed" + info "command manually before contacting the domain owner for assistance." + exit 1 + fi + set -e + + return ${RET} +} + +## Log a message to screen if verbose enabled +function log { + local MSG="$1" + + if [ ${VERBOSE_LEVEL} -gt 1 ]; then + echo ${MSG} + fi + echo $(date +"${DATE_FORMAT}") ${MSG} >> ${LOG_FILE} +} + +## Log a message to screen if debug enabled +function debug { + local MSG="$1" + + if [ ${DEBUG_LEVEL} -ge 1 ]; then + echo ${MSG} + fi + echo $(date +"${DATE_FORMAT}") ${MSG} >> ${LOG_FILE} +} + +## Log a message to screen and file +function info { + local MSG="$1" + + echo ${MSG} + echo $(date +"${DATE_FORMAT}") ${MSG} >> ${LOG_FILE} +} + +## Log a message to file and stdout +function log_warning { + local MSG="WARNING: $1" + + echo ${MSG} + echo $(date +"${DATE_FORMAT}") ${MSG} >> ${LOG_FILE} +} + + +function wait_for_service_parameters { + info "Waiting for controller-0 configuration apply" + + DELAY=0 + sleep 10 + while [ $DELAY -lt ${SERVICE_APPLY_TIMEOUT} ]; do + STATUS=$(get_host_config_status "controller-0") + if [ "${STATUS}" == "None" ]; then + # Wait an additional amount to allow the neutron-server to restart + # and be able to accept API requests. + sleep 45 + break + fi + DELAY=$((DELAY + 5)) + sleep 5 + done +} + +## Retrieve the status for a host configuration +function get_host_config_status { + local NAME=$1 + echo $(system host-show ${NAME} 2>/dev/null | awk 'BEGIN { FS="|" } { if ($2 ~ /config_status/) {print $3} }'|xargs echo) +} + + +## Retrieve the interface profile uuid for a profile name +function get_ifprofile_id { + local NAME=$1 + echo $(system ifprofile-list ${CLI_NOWRAP} | grep -E "[0-9a-z]{8}-" | grep -E "${NAME}[^-_0-9a-zA-Z]" | awk '{print $2}') +} + +## Retrieve the CPU profile uuid for a profile name +function get_cpuprofile_id { + local NAME=$1 + echo $(system cpuprofile-list ${CLI_NOWRAP} | grep -E "[0-9a-z]{8}-" | grep -E "${NAME}[^-_0-9a-zA-Z]" | awk '{print $2}') +} + +## Retrieve the storage profile uuid for a profile name +function get_storprofile_id { + local NAME=$1 + echo $(system storprofile-list ${CLI_NOWRAP} | grep -E "[0-9a-z]{8}-" | grep -E "${NAME}[^-_0-9a-zA-Z]" | awk '{print $2}') +} + +## Retrieve the address pool uuid for a pool name +function get_addrpool_id { + local NAME=$1 + echo $(system addrpool-list ${CLI_NOWRAP} | grep -E -- "${NAME}" | awk '{print $2}') +} + +## Retrieve the image id for a volume name +function get_cinder_id { + local NAME=$1 + echo $(cinder ${REGION_OPTION} show ${NAME} 2>/dev/null | awk '{ if ($2 == "id") {print $4} }') +} + +## Retrieve the status for a volume name +function get_cinder_status { + local NAME=$1 + echo $(cinder ${REGION_OPTION} show ${NAME} 2>/dev/null | awk '{ if ($2 == "status") {print $4} }') +} + +## Retrieve the router id for a router name +function get_router_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} router show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the tenant id for a tenant name +function get_tenant_id { + local NAME=$1 + echo $(openstack project show ${NAME} -c id 2>/dev/null | grep id | awk '{print $4}') +} + +## Retrieve the user id for a user name +function get_user_id { + local NAME=$1 + echo $(openstack user show ${NAME} -c id 2>/dev/null | grep id | awk '{print $4}') +} + +## Retrieve the user roles for a tenant and user +function get_user_roles { + local TENANT=$1 + local USERNAME=$2 + echo $(openstack role list --project ${TENANT} --user ${USERNAME} 2>/dev/null | grep ${USERNAME} | awk '{print $4}') +} + +## Build the network name for a given network instance. For backwards +## compatibility with existing testcases the first network instance has no +## number in its' name. +function get_mgmt_network_name { + local PREFIX=$1 + local NUMBER=$2 + + local NAME=${PREFIX} + if [ ${NUMBER} -ne 0 ]; then + NAME=${NAME}${NUMBER} + fi + + echo ${NAME} + return 0 +} + +## Retrieve the network id for a network name +function get_network_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} network show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the network MTU for a network id +function get_network_mtu { + local ID=$1 + echo $(openstack ${REGION_OPTION} network show ${ID} -c mtu -f value 2>/dev/null) +} + +## Get the fixed ip on the tenant network +function get_network_ip_address { + local NETNUMBER=$1 + local HOSTNUMBER=$2 + local TENANTNUM=$3 + ## tenant1 gets VM addresses: 172.16.*.1, 172.16.*.3, 172.16.*.5 + ## tenant1 gets Ixia addresses: 172.16.*.2, 172.16.*.4, 172.16.*.6 + ## tenant2 gets VM addresses: 172.18.*.1, 172.18.*.3, 172.18.*.5 + ## tenant2 gets Ixia addresses: 172.18.*.2, 172.18.*.4, 172.18.*.6 + echo "172.$((16 + ${TENANTNUM} * 2)).${NETNUMBER}.$((1 + (${HOSTNUMBER} * 2)))" +} + +## construct the nova boot arg for a fixed ip on the tenant network +function get_network_ip { + if [ "x${ROUTED_TENANT_NETWORKS}" == "xyes" ]; then + ## Doesn't matter since the VM will be bridging traffic and not routing + echo "" + else + echo ",v4-fixed-ip=$(get_network_ip_address $1 $2 $3)" + fi +} + +## Retrieve the subnet id for a subnet name +function get_subnet_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} subnet show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the providernet id for a providernet name +function get_provider_network_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} providernet show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the datanetwork uuid for a datanetwork name +function get_data_network_uuid { + local NAME=$1 + echo $(system datanetwork-list | grep "${NAME}" | awk '{print $2}') +} + +## Retrieve the providernet id for a providernet name +function get_provider_network_range_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} providernet range show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the qos id for a qos name +function get_qos_id { + local NAME=$1 + echo $(openstack ${REGION_OPTION} qos show ${NAME} -c id -f value 2>/dev/null) +} + +## Retrieve the flavor id for a flavor name +function get_flavor_id { + local NAME=$1 + echo $(nova ${REGION_OPTION} flavor-show ${NAME} 2>/dev/null | grep "| id" | awk '{print $4}') +} + +## Retrieve a specific neutron quota value for a tenant +function get_neutron_quota { + local TENANTID=$1 + local QUOTA=$2 + + echo $(openstack ${REGION_OPTION} quota show ${TENANTID} -c ${QUOTA} -f value 2>/dev/null) +} + +## Retrieve the hosts ip address of managment network +function get_mgmt_ip { + local ID=$1 + echo $(system host-show ${ID} 2>/dev/null | grep "mgmt_ip" | awk '{print $4}') +} + +## Retrieve the numa node flavor modifier for a given tenant +function get_flavor_modifier { + local TENANTNUM=$1 + local VMCOUNTER=$2 + + if [ "${TENANTNUM}" -gt ${#TENANTNODES[@]} ]; then + echo "" + elif [ "${TENANTNODES[${TENANTNUM}]}" == "split" ]; then + local NODE=$(($((VMCOUNTER-1)) % ${NUMA_NODE_COUNT})) + echo "node${NODE}" + elif [ ! -z "${TENANTNODES[${TENANTNUM}]}" ]; then + echo "${TENANTNODES[${TENANTNUM}]}" + else + echo "" + fi + + return 0 +} + +## Retrieve the network id of the network to be used between two tenant VM +## instances. If SHARED_TENANT_NETWORKS is set to "yes" then it is assumed +## that only a single VM is running and instead of using one of the actual +## internal networks the VM should use the other VM's tenant network to return +## traffic to ixia. +# +function get_internal_network_id { + local TENANTNUM=$1 + local NETNUMBER=$2 + local OTHER_TENANTNET="${TENANTS[$((1 - ${TENANTNUM}))]}-net" + local NET=0 + + if [ "x${SHARED_TENANT_NETWORKS}" == "xyes" ]; then + INTERNALNETID=$(get_network_id ${OTHER_TENANTNET}${NETNUMBER}) + else + if [ "x${REUSE_NETWORKS}" != "xyes" ]; then + NET=$((NETNUMBER / ${MAXVLANS})) + fi + INTERNALNETID=$(get_network_id ${INTERNALNET}${NET}) + fi + + echo ${INTERNALNETID} + return 0 +} + +function get_internal_network_name { + local TENANTNUM=$1 + local NETNUMBER=$2 + local OTHER_TENANTNET="${TENANTS[$((1 - ${TENANTNUM}))]}-net" + local NET=0 + + if [ "x${SHARED_TENANT_NETWORKS}" == "xyes" ]; then + echo ${OTHER_TENANTNET}${NETNUMBER} + else + if [ "x${REUSE_NETWORKS}" != "xyes" ]; then + NET=$((NETNUMBER / ${MAXVLANS})) + fi + echo ${INTERNALNET}${NET} + fi + + return 0 +} + +## Retrieve the speaker id for a speaker name +function get_bgp_speaker_id { + local NAME=$1 + echo $(neutron bgp-speaker-show ${NAME} -F id 2>/dev/null | grep id | awk '{print $4}') +} + +## Retrieve the peer id for a peer name +function get_bgp_peer_id { + local NAME=$1 + echo $(neutron bgp-peer-show ${NAME} -F id 2>/dev/null | grep id | awk '{print $4}') +} + +## Retrieve the bgpvpn id for a bgpvpn name +function get_bgp_vpn_id { + local NAME=$1 + echo $(neutron bgpvpn-show ${NAME} -F id 2>/dev/null | grep id | awk '{print $4}') +} + +## Retrieve the value of a prefixed variable name. If a variable named +## ${PREFIX}_${NAME} does not exist then a variable with DEFAULT_${NAME} is +## used. +## +function get_variable { + local PREFIX=$1 + local NAME=$2 + + ## No specific AEMODE for this interface, try the type-specific value + local VARNAME=${PREFIX^^}_${NAME^^} + if [ -z ${!VARNAME+x} ]; then + ## No type-specific value, use the global default + VARNAME=DEFAULT_${NAME^^} + if [ -z ${!VARNAME+x} ]; then + echo "Missing variable ${VARNAME}" + exit 2 + fi + VALUE=${!VARNAME} + else + VALUE=${!VARNAME} + fi + + echo $VALUE +} + + +## Retrieve the value of a worker node overridden variable. First the +## variable ${WORKER0_VARNAME} is checked and if no variable exists then +## ${VARNAME} is returned. If ${VARNAME} also does not exist then an empty +## value is returned. +## +function get_node_variable { + local NODE=${1/-/} + local VARNAME=${NODE^^}_$2 + + if [ -z "${!VARNAME+x}" ]; then + # There is no node specific variable available use the default + VARNAME=$2 + fi + + if [ -z "${!VARNAME+x}" ]; then + # There is no variable with the requested name so return an empty + # string + echo "" + else + # Dereference the variable name and echo its' contents + echo ${!VARNAME} + fi + + return 0 +} + +## Generates the neutron provider args for an internal network +function get_internal_provider_network { + DATA=(${INTERNALPNET//|/ }) + local PNET_TYPE=${DATA[0]} + local PNET_NAME=group${GROUPNO}-${DATA[1]} + if [ ${PNET_TYPE} == 'vxlan' ]; then + echo "--provider-network-type=${PNET_TYPE}" + else + echo "--provider-network-type=${PNET_TYPE} --provider-physical-network=${PNET_NAME}" + fi +} + +## Generates the neutron provider args for the external network +function get_external_provider_network { + DATA=(${EXTERNALPNET//|/ }) + local PNET_TYPE=${DATA[0]} + local PNET_NAME=group${GROUPNO}-${DATA[1]} + local SEGMENT_ID=${DATA[2]} + local PNET_ARGS="--provider-network-type=${PNET_TYPE} --provider-physical-network=${PNET_NAME}" + + if [ ${PNET_TYPE} == 'vxlan' ]; then + PNET_ARGS="--provider-network-type=${PNET_TYPE}" + fi + if [ ${SEGMENT_ID} != 'none' ]; then + PNET_ARGS="${PNET_ARGS} --provider-segment=${SEGMENT_ID}" + fi + + echo ${PNET_ARGS} +} + + +## Generates the list of provider networks for data interface +function get_provider_networks { + local NODE=$1 + local IFACE=$2 + + if [ ${PROVIDERNET_TYPE} != "mixed" ]; then + if [ ${IFACE} == "data0" ]; then + echo ${PROVIDERNET0} + elif [ ${IFACE} == "data1" ]; then + echo ${PROVIDERNET1} + else + echo "error" + fi + else + if [ ${IFACE} == "data0" ]; then + echo "${PROVIDERNET0},${PROVIDERNET0}-vxlan" + elif [ ${IFACE} == "data1" ]; then + echo ${PROVIDERNET1} + else + echo "error" + fi + fi +} + + +## Rudimentary check to determine if an address is IPv4 +## +function is_ipv4 { + local VALUE=$1 + + [[ ${VALUE} =~ ^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$ ]] + if [ $? -eq 0 ]; then + return 1 + else + return 0 + fi +} + +## Query the "ipv4-mode" attribute of an interface +## +function get_interface_ipv4_mode { + local NODE=$1 + local IFACE=$2 + local MODE="" + + echo $(system host-if-show ${NODE} ${IFNAME} | grep "ipv4_mode" | awk '{ print $4 }') + return 0 +} + +## Query the "ipv6-mode" attribute of an interface +## +function get_interface_ipv6_mode { + local NODE=$1 + local IFACE=$2 + local MODE="" + + echo $(system host-if-show ${NODE} ${IFNAME} | grep "ipv6_mode" | awk '{ print $4 }') + return 0 +} + +# Retrieve the port name by PCI address and device +function get_port_name { + local NODE=$1 + local PCIADDR=$2 + local PCIDEV=$3 + + PCIMATCH="${PCIADDR} | ${PCIDEV}" + echo $(system host-port-list ${NODE} ${CLI_NOWRAP} | grep "${PCIMATCH}" | awk '{print $4}') +} + +## Retrieve the interface uuid that is associated to the port specified by the +## PCI address and device provided. +## +function get_interface_uuid { + local NODE=$1 + local PCIADDR=$2 + local PCIDEV=$3 + + PCIMATCH="${PCIADDR} | ${PCIDEV}" + DATA=$(system host-port-list ${NODE} ${CLI_NOWRAP} | grep "${PCIMATCH}" | awk '{printf "%s %s", $2, $4}') + DATA=(${DATA//=/ }) + NAME=${DATA[1]} + UUID=${DATA[0]} + + ID=`system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v PORT=[u\'${NAME}\'] '($12 == PORT) {print $2}'` + echo ${ID} +} + +## Derive the guest NIC device type based on the VM type +function get_guest_nic_device { + local VMTYPE=$1 + + if [ "$VMTYPE" == "vswitch" ]; then + NIC_DEVICE="${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_MEMORY}:${PCI_SUBDEVICE_AVP}" + elif [ "$VMTYPE" == "vhost" ]; then + NIC_DEVICE="${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_VIRTIO}:${PCI_SUBDEVICE_NET}" + else + NIC_DEVICE="" + fi + + echo "${NIC_DEVICE}" + return 0; +} + +## Determine whether one of the setup stages has already completed +function is_stage_complete { + local STAGE=$1 + local NODE=${2:-""} + local FILE="" + + if [ "${FORCE}" == "no" ]; then + if [ -z "${NODE}" ]; then + FILE=${GROUP_STATUS}.${STAGE} + else + FILE=${GROUP_STATUS}.${NODE}.${STAGE} + fi + if [ -f ${FILE} ]; then + return 0 + fi + fi + + return 1 +} + +## Mark a stage as complete +function stage_complete { + local STAGE=$1 + local NODE=${2:-""} + local FILE="" + + if [ -z "${NODE}" ]; then + FILE=${GROUP_STATUS}.${STAGE} + else + FILE=${GROUP_STATUS}.${NODE}.${STAGE} + fi + + touch ${FILE} + log "Stage complete: ${STAGE}" + + return 0 +} + + +## Set the system name and description +## +function set_system_name { + local NAME=$1 + local DESCRIPTION="${NAME}: setup by lab_setup.sh" + + info "Setting system name" + source ${OPENRC} + log_command "system modify --name=${NAME} --description=\"${DESCRIPTION}\"" + RET=$? + return ${RET} +} + + +## Set the system vswitch type +## +function set_vswitch_type { + local TYPE=$1 + + if is_stage_complete "vswitch_type"; then + info "Skipping vswitch type configuration; already done" + return 0 + fi + + info "Setting vswitch type" + source ${OPENRC} + + CURRENT=$(system show|grep vswitch_type|awk '{print $4}') + if [ $VSWITCH_TYPE != $CURRENT ]; then + log_command "system modify --vswitch_type ${TYPE}" + RET=$? + fi + stage_complete "vswitch_type" + return ${RET} +} + +## Set the DNS configuration +## +function set_dns { + if is_stage_complete "dns"; then + info "Skipping DNS configuration; already done" + return 0 + fi + info "Setting DNS configuration" + source ${OPENRC} + log_command "system dns-modify nameservers=${NAMESERVERS} action=apply" + RET=$? + stage_complete "dns" + return ${RET} +} + +## Set the NTP/PTP configuration +## +function set_time_service { + RET=0 + if is_stage_complete "time"; then + info "Skipping ${TIMESERVICE} configuration; already done" + return ${RET} + fi + info "Setting ${TIMESERVICE} configuration" + source ${OPENRC} + if [ "x${TIMESERVICE}" == "xNTP" ]; then + log_command "system ntp-modify ntpservers=${NTPSERVERS}" + RET=$? + stage_complete "time" + elif [ "x${TIMESERVICE}" == "xPTP" ]; then + log_command "system ntp-modify --enabled=false" + log_command "system ptp-modify --enabled=true" + RET=$? + stage_complete "time" + else + info "Unsupported time service type: ${TIMESERVICE}" + RET=1 + fi + return ${RET} +} + +## Check for prerequisite files +## +function check_required_files { + mkdir -p ${USERDATA_DIR} + mkdir -p ${BOOTDIR} + + if [ ! -f ${OPENRC} ]; then + echo "Nova credential file is missing: ${OPENRC}" + return 1 + fi + + if [ ${GROUPNO} -eq 0 ]; then + if [ ! -L ${DEFAULT_BOOTDIR} ]; then + ln -s ${BOOTDIR} ${DEFAULT_BOOTDIR} + fi + fi + + return 0 +} + +## Increment an IP address by an arbitrary amount. There is no elegant way of +## doing this in bash, that handles both ipv4 and ipv6, without installing +## other packages so python is used. +function ip_incr { + local IPADDRESS=$1 + local VALUE=$2 + + python -c "import netaddr;print netaddr.IPAddress('${IPADDRESS}')+${VALUE}" +} + +## Add tenants and users +function add_tenants { + local TENANTID=0 + + if is_stage_complete "tenants"; then + info "Skipping tenant configuration; already done" + return 0 + fi + + info "Adding tenants" + + for TENANT in ${TENANTS[@]}; do + ## Create the project if it does not exist + TENANTID=$(get_tenant_id ${TENANT}) + if [ -z "${TENANTID}" ]; then + log_command "openstack project create \ +--domain ${OPENSTACK_PROJECT_DOMAIN} --description ${TENANT} ${TENANT}" + fi + + ## Create the user if it does not exist + USERID=$(get_user_id ${TENANT}) + if [ -z "${USERID}" ]; then + log_command "openstack user create \ +--password ${DEFAULT_OPENSTACK_PASSWORD} --domain ${OPENSTACK_USER_DOMAIN} --project ${TENANT} \ +--project-domain ${OPENSTACK_PROJECT_DOMAIN} \ +--email ${TENANT}@noreply.com ${TENANT}" + fi + + ## Ensure tenant user is a member of the project + ROLES=$(get_user_roles ${TENANT} ${TENANT}) + if [[ ! $ROLES =~ ${MEMBER_ROLE} ]]; then + log_command "openstack role add --project ${TENANT} \ +--project-domain ${OPENSTACK_PROJECT_DOMAIN} --user ${TENANT} \ +--user-domain ${OPENSTACK_USER_DOMAIN} ${MEMBER_ROLE}" + fi + + ## Add the admin user to each group/project so that all projects can be + ## accessed from Horizon without needing to logout/login again. + ROLES=$(get_user_roles ${TENANT} ${ADMIN_USER}) + if [[ ! $ROLES =~ ${MEMBER_ROLE} ]]; then + log_command "openstack role add --project ${TENANT} \ +--project-domain ${OPENSTACK_PROJECT_DOMAIN} --user ${ADMIN_USER} \ +--user-domain ${OPENSTACK_USER_DOMAIN} ${MEMBER_ROLE}" + fi + done + + stage_complete "tenants" + + return 0 +} + +## Create credential files for tenants +## +function create_credentials { + local TENANT="" + local ADMIN_URL="35357" + local PUBLIC_URL="5000" + local K8S_URL="http://keystone.openstack.svc.cluster.local/v3" + + if is_stage_complete "credentials"; then + info "Skipping credential configuration; already done" + return 0 + fi + + info "Adding tenant credentials" + + for TENANT in ${TENANTS[@]}; do + cp ${OPENRC} ${HOME}/openrc.${TENANT} + sed -i -e "s#admin#${TENANT}#g" ${HOME}/openrc.${TENANT} + sed -i -e "s#\(OS_PASSWORD\)=.*#\1=${DEFAULT_OPENSTACK_PASSWORD}#g" ${HOME}/openrc.${TENANT} + sed -i -e "s#${ADMIN_URL}#${PUBLIC_URL}#g" ${HOME}/openrc.${TENANT} + if [ "$K8S_ENABLED" == "yes" ]; then + sed -i -e "s#\(OS_AUTH_URL\)=.*#\1=${K8S_URL}#g" ${HOME}/openrc.${TENANT} + fi + done + + if [ "$K8S_ENABLED" == "yes" ]; then + cp ${OPENRC} ${HOME}/openrc.admin + sed -i -e "s#\(OS_AUTH_URL\)=.*#\1=${K8S_URL}#g" ${HOME}/openrc.admin + fi + + stage_complete "credentials" + + return 0 +} + + +function set_cinder_quotas { + if is_stage_complete "quotas_cinder"; then + info "Skipping cinder quota configuration; already done" + return 0 + fi + + info "Adding cinder quotas" + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + for TENANT in ${TENANTS[@]}; do + TENANTID=$(get_tenant_id ${TENANT}) + log_command "openstack ${REGION_OPTION} quota set ${TENANTID} \ +--volumes ${VOLUME_QUOTA} \ +--snapshots ${SNAPSHOT_QUOTA}" + done + + stage_complete "quotas_cinder" +} + +## Adjust quotas +## +function set_quotas { + local SQUOTA=0 + local NQUOTA=0 + local PQUOTA=0 + local RAMPARAM="" + + if is_stage_complete "quotas"; then + info "Skipping quota configuration; already done" + return 0 + fi + + info "Adding quotas" + + ## Set the Admin quotas. + NQUOTA=$(get_neutron_quota ${ADMINID} "network") + if [ "x${NQUOTA}" != "x${ADMIN_NETWORK_QUOTA}" -a ${GROUPNO} -eq 0 ]; then + log_command "openstack ${REGION_OPTION} quota set ${ADMINID} --networks ${ADMIN_NETWORK_QUOTA}" + fi + SQUOTA=$(get_neutron_quota ${ADMINID} "subnet") + if [ "x${SQUOTA}" != "x${ADMIN_SUBNET_QUOTA}" -a ${GROUPNO} -eq 0 ]; then + log_command "openstack ${REGION_OPTION} quota set ${ADMINID} --subnets ${ADMIN_SUBNET_QUOTA}" + fi + PQUOTA=$(get_neutron_quota ${ADMINID} "port") + if [ "x${PQUOTA}" != "x${ADMIN_PORT_QUOTA}" -a ${GROUPNO} -eq 0 ]; then + log_command "openstack ${REGION_OPTION} quota set ${ADMINID} --ports ${ADMIN_PORT_QUOTA}" + fi + + # Only set RAM quota if value is specified + if [ ! -z "${RAM_QUOTA}" ]; then + RAMPARAM="--ram ${RAM_QUOTA}" + fi + + for TENANT in ${TENANTS[@]}; do + TENANTID=$(get_tenant_id ${TENANT}) + NQUOTA=$(get_neutron_quota ${TENANTID} "network") + SQUOTA=$(get_neutron_quota ${TENANTID} "subnet") + PQUOTA=$(get_neutron_quota ${TENANTID} "port") + ## Setup neutron quota (if necessary) + if [ "x${NQUOTA}" != "x${NETWORK_QUOTA}" -o "x${SQUOTA}" != "x${SUBNET_QUOTA}" -o "x${PQUOTA}" != "x${PORT_QUOTA}" ]; then + log_command "openstack ${REGION_OPTION} quota set ${TENANTID} \ +--subnets ${SUBNET_QUOTA} \ +--networks ${NETWORK_QUOTA} \ +--ports ${PORT_QUOTA} \ +--floating-ips ${FLOATING_IP_QUOTA}" + fi + + ## Setup nova quotas + log_command "nova ${REGION_OPTION} quota-update ${TENANTID} \ +--instances ${INSTANCE_QUOTA} \ +--cores ${CORE_QUOTA} \ +${RAMPARAM}" + + done + + ## Prevent the admin from launching VMs + log_command "nova ${REGION_OPTION} quota-update ${ADMINID} --instances 0 --cores 0" + log_command "openstack ${REGION_OPTION} quota set ${ADMINID} --floating-ips 0" + + stage_complete "quotas" +} + +## Sets up a single flat data network with no ranges +## +function setup_flat_data_network { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + + local TRANSPARENT_ARGS="--vlan-transparent ${VLAN_TRANSPARENT}" + if [ "${VLAN_TRANSPARENT_INTERNAL_NETWORKS}" == "True" ]; then + TRANSPARENT_ARGS="--vlan-transparent=True" + fi + + log_command "system ${REGION_OPTION} datanetwork-add ${NAME} flat -m ${MTU}" + + return 0 +} + +## Sets up a single VLAN data network with range details +## +function setup_vlan_data_network { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + local RANGES=$4 + local OWNER=$5 + + local TRANSPARENT_ARGS="--vlan-transparent ${VLAN_TRANSPARENT}" + if [ "${VLAN_TRANSPARENT_INTERNAL_NETWORKS}" == "True" ]; then + TRANSPARENT_ARGS="--vlan-transparent=True" + fi + + + DNETUUID=$(get_data_network_uuid ${NAME}) + if [ -z "${DNETUUID}" ]; then + log_command "system ${REGION_OPTION} datanetwork-add ${NAME} vlan -m ${MTU}" + fi + + return 0 +} + +## Adds a set of internal segments ranges alternating between VXLAN_GROUP and +## VXLAN_PORT values according with VXLAN_INTERNAL_STEP segments in each +## range. This is to facilitate having different group addresses and +## potentially each group could be either IPv4 or IPv6 +## +function setup_vxlan_provider_network_ranges { + local INDEX=$1 + local NAME=$2 + local OWNER_ARGS="$3" + local RANGES=(${4//,/ }) + local GROUPS_VARNAME=$5 + local PORTS_VARNAME=$6 + local VXLAN_ARGS="" + local GROUP_IDX=0 + local PORT_IDX=0 + + local MCAST_GROUPS=(${!GROUPS_VARNAME}) + local UDP_PORTS=(${!PORTS_VARNAME}) + + VALUES=${MCAST_GROUPS[@]} + debug "vxlan provider ranges with ${#MCAST_GROUPS[@]} group attributes: ${VALUES}" + VALUES=${UDP_PORTS[@]} + debug "vxlan provider ranges with ${#UDP_PORTS[@]} attributes: ${VALUES}" + + local COUNT=0 + for I in ${!RANGES[@]}; do + local RANGE=${RANGES[${I}]} + RANGE=(${RANGE/-/ }) + + ## Start at the beginning for each new range + GROUP_IDX=0 + PORT_IDX=0 + + for J in $(seq ${RANGE[0]} ${VXLAN_INTERNAL_STEP} ${RANGE[1]}); do + local RANGE_NAME=${NAME}-r${INDEX}-${COUNT} + + RANGEID=$(get_provider_network_range_id ${RANGE_NAME}) + if [ ! -z "${RANGEID}" ]; then + ## already exists + continue + fi + + VXLAN_ARGS="--port ${UDP_PORTS[${PORT_IDX}]} --ttl ${VXLAN_TTL}" + GROUP_ARG="${MCAST_GROUPS[${GROUP_IDX}]}" + if [[ "${GROUP_ARG}" == @(static|evpn) ]]; then + VXLAN_ARGS="${VXLAN_ARGS} --mode ${GROUP_ARG}" + else + VXLAN_ARGS="${VXLAN_ARGS} --group ${GROUP_ARG}" + fi + + END=$((J+${VXLAN_INTERNAL_STEP}-1)) + END=$((${END} < ${RANGE[1]} ? ${END} : ${RANGE[1]})) + log_command "openstack ${REGION_OPTION} providernet range create ${NAME} --name ${RANGE_NAME} ${OWNER_ARGS} --range ${J}-${END} ${VXLAN_ARGS}" + + GROUP_IDX=$(((GROUP_IDX + 1) % ${#MCAST_GROUPS[@]})) + PORT_IDX=$(((PORT_IDX + 1) % ${#UDP_PORTS[@]})) + + COUNT=$((COUNT+1)) + done + done + + return 0 +} + +## Sets up a single VXLAN provider network with range details and VXLAN +## attributes. The GROUPS and PORTS arguments are optional and if not +## specified will be taken from the global variable VXLAN_GROUPS or +## VXLAN_PORTS. +## +function setup_vxlan_provider_network { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + local RANGES=$4 + local OWNER=$5 + set +u + local MCAST_GROUPS=(${6//,/ }) + local UDP_PORTS=(${7//,/ }) + local TTL=$8 + set -u + + local TRANSPARENT_ARGS="--vlan-transparent ${VLAN_TRANSPARENT}" + if [ "${VLAN_TRANSPARENT_INTERNAL_NETWORKS}" == "True" ]; then + TRANSPARENT_ARGS="--vlan-transparent=True" + fi + + PNETID=$(get_provider_network_id ${NAME}) + if [ -z "${PNETID}" ]; then + log_command "openstack ${REGION_OPTION} providernet create ${NAME} --type vxlan --mtu ${MTU} ${TRANSPARENT_ARGS}" + fi + + OWNER_ARGS="--shared" + if [ "${OWNER}" != "shared" ]; then + OWNER_ARGS="--project $(get_tenant_id ${OWNER})" + fi + + if [ -z "${MCAST_GROUPS+x}" ]; then + ## Use global defaults + MCAST_GROUPS=(${VXLAN_GROUPS}) + fi + + if [ -z "${UDP_PORTS+x}" ]; then + ## Use global defaults + UDP_PORTS=(${VXLAN_PORTS}) + fi + + if [ -z "${TTL}" ]; then + ## Use global default + TTL=${VXLAN_TTL} + fi + + if [ ${#MCAST_GROUPS[@]} -gt 1 -o ${#UDP_PORTS[@]} -gt 1 ]; then + ## Setup smaller ranges with varying groups and ports + setup_vxlan_provider_network_ranges ${INDEX} ${NAME} "${OWNER_ARGS}" ${RANGES} MCAST_GROUPS[@] UDP_PORTS[@] + else + ## Setup just a single set of ranges each with the same VXLAN attributes + VALUES=${MCAST_GROUPS[@]} + debug "vxlan provider ranges with group attributes: ${VALUES}" + VALUES=${UDP_PORTS[@]} + debug "vxlan provider ranges with attributes: ${VALUES}" + + RANGES=(${RANGES//,/ }) + for I in ${!RANGES[@]}; do + local RANGE=${RANGES[${I}]} + local RANGE_NAME=${NAME}-r${INDEX}-${I} + + RANGEID=$(get_provider_network_range_id ${RANGE_NAME}) + if [ ! -z "${RANGEID}" ]; then + ## already exists + return 0 + fi + + VXLAN_ARGS="--ttl ${TTL} --port ${UDP_PORTS[0]}" + GROUP_ARG="${MCAST_GROUPS[0]}" + if [[ "${GROUP_ARG}" == @(static|evpn) ]]; then + VXLAN_ARGS="${VXLAN_ARGS} --mode ${GROUP_ARG}" + else + VXLAN_ARGS="${VXLAN_ARGS} --group ${GROUP_ARG}" + fi + log_command "openstack ${REGION_OPTION} providernet range create ${NAME} --name ${RANGE_NAME} --range ${RANGE} ${OWNER_ARGS} ${VXLAN_ARGS}" + done + fi + + return 0 +} + +## Sets up a single VXLAN provider network with range details and VXLAN +## attributes. The GROUPS and PORTS arguments are optional and if not +## specified will be taken from the global variable VXLAN_GROUPS or +## VXLAN_PORTS. +## +function setup_vxlan_data_network { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + local RANGES=$4 + local OWNER=$5 + set +u + local MCAST_GROUPS=(${6//,/ }) + local UDP_PORTS=(${7//,/ }) + local TTL=$8 + set -u + + local TRANSPARENT_ARGS="--vlan-transparent ${VLAN_TRANSPARENT}" + if [ "${VLAN_TRANSPARENT_INTERNAL_NETWORKS}" == "True" ]; then + TRANSPARENT_ARGS="--vlan-transparent=True" + fi + + + OWNER_ARGS="--shared" + if [ "${OWNER}" != "shared" ]; then + OWNER_ARGS="--project $(get_tenant_id ${OWNER})" + fi + + if [ -z "${MCAST_GROUPS+x}" ]; then + ## Use global defaults + MCAST_GROUPS=(${VXLAN_GROUPS}) + fi + + if [ -z "${UDP_PORTS+x}" ]; then + ## Use global defaults + UDP_PORTS=(${VXLAN_PORTS}) + fi + + if [ -z "${TTL}" ]; then + ## Use global default + TTL=${VXLAN_TTL} + fi + + DNETUUID=$(get_data_network_uuid ${NAME}) + if [ -z "${DNETUUID}" ]; then + DNVXLAN_ARGS="-p ${UDP_PORTS[0]} -g ${MCAST_GROUPS[0]} -t ${TTL}" + DNMODE_ARG="${MCAST_GROUPS[0]}" + if [[ "${DNMODE_ARG}" == "static" ]]; then + DNVXLAN_ARGS="-p ${UDP_PORTS[0]} -t ${TTL} -M static" + fi + log_command "system ${REGION_OPTION} datanetwork-add ${NAME} vxlan -m ${MTU} ${DNVXLAN_ARGS}" + fi + + return 0 +} +## Loops over all PROVIDERNETS entries and creates each provider network +## according to its attributes. +## +function add_provider_networks { + local PNETS=(${PROVIDERNETS}) + + source ${OPENRC} + + if is_stage_complete "datanetworks"; then + info "Skipping provider networks; already done" + return 0 + fi + + info "Adding data networks" + + for IFINDEX in ${!PNETS[@]}; do + ENTRY=${PNETS[${IFINDEX}]} + DATA=(${ENTRY//|/ }) + TYPE=${DATA[0]} + + debug "setting up providernet(${IFINDEX}): ${ENTRY}" + + ## Remove the type from the array + unset DATA[0] + DATA=(${DATA[@]}) + + if [ "${TYPE}" == "vxlan" ]; then + setup_vxlan_data_network ${IFINDEX} "${DATA[@]}" + + elif [ "${TYPE}" == "vlan" ]; then + setup_vlan_data_network ${IFINDEX} "${DATA[@]}" + + elif [ "${TYPE}" == "flat" ]; then + setup_flat_data_network ${IFINDEX} "${DATA[@]}" + + else + echo "unsupported data network type: ${TYPE}" + return 1 + fi + + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup data network(${IFINDEX}): ${ENTRY}" + return ${RET} + fi + done + + stage_complete "datanetworks" + + return 0 +} + +## Retrieve the network segment range id for a segment range name +function get_network_segment_range_id { + local RANGE_NAME=$1 + echo $(openstack ${REGION_OPTION} network segment range show ${RANGE_NAME} -c id -f value 2>/dev/null) +} + +## Adds a set of internal segments ranges alternating between VXLAN_GROUP and +## VXLAN_PORT values according with VXLAN_INTERNAL_STEP segments in each +## range. This is to facilitate having different group addresses and +## potentially each group could be either IPv4 or IPv6 +## +function setup_vxlan_network_segment_ranges_varied { + local INDEX=$1 + local NAME=$2 + local OWNER_ARGS="$3" + local RANGES=(${4//,/ }) + local GROUPS_VARNAME=$5 + local PORTS_VARNAME=$6 + local GROUP_IDX=0 + local PORT_IDX=0 + + local MCAST_GROUPS=(${!GROUPS_VARNAME}) + local UDP_PORTS=(${!PORTS_VARNAME}) + + VALUES=${MCAST_GROUPS[@]} + debug "vxlan provider ranges with ${#MCAST_GROUPS[@]} group attributes: ${VALUES}" + VALUES=${UDP_PORTS[@]} + debug "vxlan provider ranges with ${#UDP_PORTS[@]} attributes: ${VALUES}" + + local COUNT=0 + for I in ${!RANGES[@]}; do + local RANGE=${RANGES[${I}]} + RANGE=(${RANGE/-/ }) + + ## Start at the beginning for each new range + GROUP_IDX=0 + PORT_IDX=0 + + for J in $(seq ${RANGE[0]} ${VXLAN_INTERNAL_STEP} ${RANGE[1]}); do + local RANGE_NAME=${NAME}-r${INDEX}-${COUNT} + + RANGEID=$(get_network_segment_range_id ${RANGE_NAME}) + if [ ! -z "${RANGEID}" ]; then + ## already exists + continue + fi + + END=$((J+${VXLAN_INTERNAL_STEP}-1)) + END=$((${END} < ${RANGE[1]} ? ${END} : ${RANGE[1]})) + + log_command "openstack ${REGION_OPTION} network segment range create ${RANGE_NAME} --network-type vxlan --minimum ${J} --maximum ${END} ${OWNER_ARGS}" + + # log_command "openstack ${REGION_OPTION} providernet range create ${NAME} --name ${RANGE_NAME} ${OWNER_ARGS} --range ${J}-${END} ${VXLAN_ARGS}" + + GROUP_IDX=$(((GROUP_IDX + 1) % ${#MCAST_GROUPS[@]})) + PORT_IDX=$(((PORT_IDX + 1) % ${#UDP_PORTS[@]})) + + COUNT=$((COUNT+1)) + done + done + + return 0 +} + +## Sets up a single VXLAN provider network with range details and VXLAN +## attributes. The GROUPS and PORTS arguments are optional and if not +## specified will be taken from the global variable VXLAN_GROUPS or +## VXLAN_PORTS. +## +function setup_vxlan_network_segment_ranges { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + local RANGES=$4 + local OWNER=$5 + set +u + local MCAST_GROUPS=(${6//,/ }) + local UDP_PORTS=(${7//,/ }) + local TTL=$8 + set -u + + OWNER_ARGS="--shared" + if [ "${OWNER}" != "shared" ]; then + OWNER_ARGS="--private --project $(get_tenant_id ${OWNER})" + fi + + if [ -z "${MCAST_GROUPS+x}" ]; then + ## Use global defaults + MCAST_GROUPS=(${VXLAN_GROUPS}) + fi + + if [ -z "${UDP_PORTS+x}" ]; then + ## Use global defaults + UDP_PORTS=(${VXLAN_PORTS}) + fi + + if [ -z "${TTL}" ]; then + ## Use global default + TTL=${VXLAN_TTL} + fi + + if [ ${#MCAST_GROUPS[@]} -gt 1 -o ${#UDP_PORTS[@]} -gt 1 ]; then + ## Setup smaller ranges with varying groups and ports + setup_vxlan_network_segment_ranges_varied ${INDEX} ${NAME} "${OWNER_ARGS}" ${RANGES} MCAST_GROUPS[@] UDP_PORTS[@] + else + ## Setup just a single set of ranges each with the same VXLAN attributes + VALUES=${MCAST_GROUPS[@]} + debug "vxlan provider ranges with group attributes: ${VALUES}" + VALUES=${UDP_PORTS[@]} + debug "vxlan provider ranges with attributes: ${VALUES}" + + RANGES=(${RANGES//,/ }) + for I in ${!RANGES[@]}; do + local RANGE=${RANGES[${I}]} + local RANGE_MIN=$(echo "${RANGE}" | cut -f1 -d-) + local RANGE_MAX=$(echo "${RANGE}" | cut -f2 -d-) + local RANGE_NAME=${NAME}-r${INDEX}-${I} + + RANGEID=$(get_network_segment_range_id ${RANGE_NAME}) + if [ ! -z "${RANGEID}" ]; then + ## already exists + return 0 + fi + + log_command "openstack ${REGION_OPTION} network segment range create ${RANGE_NAME} --network-type vxlan --minimum ${RANGE_MIN} --maximum ${RANGE_MAX} ${OWNER_ARGS}" + + done + fi + + return 0 +} + +## Sets up a single VLAN data network with range details +## +function setup_vlan_network_segment_ranges { + local INDEX=$1 + local NAME=group${GROUPNO}-$2 + local MTU=$3 + local RANGES=$4 + local OWNER=$5 + + OWNER_ARGS="--shared" + if [ "${OWNER}" != "shared" ]; then + OWNER_ARGS="--private --project $(get_tenant_id ${OWNER})" + fi + + RANGES=(${RANGES//,/ }) + for I in ${!RANGES[@]}; do + local RANGE=${RANGES[${I}]} + local RANGE_NAME=${NAME}-r${INDEX}-${I} + local RANGE_MIN=$(echo "${RANGE}" | cut -f1 -d-) + local RANGE_MAX=$(echo "${RANGE}" | cut -f2 -d-) + + RANGEID=$(get_network_segment_range_id ${RANGE_NAME}) + if [ ! -z "${RANGEID}" ]; then + ## already exists + return 0 + fi + + log_command "openstack ${REGION_OPTION} network segment range create ${RANGE_NAME} --network-type vlan --physical-network ${NAME} --minimum ${RANGE_MIN} --maximum ${RANGE_MAX} ${OWNER_ARGS}" + + done + + return 0 +} + +## Loops over all PROVIDERNETS entries and creates each +## network segment range according to its attributes. +## +function add_network_segment_ranges { + local PNETS=(${PROVIDERNETS}) + + source ${OPENRC} + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + + + if is_stage_complete "network_segment_ranges"; then + info "Skipping network segment ranges; already done" + return 0 + fi + + info "Adding network segment ranges" + + for IFINDEX in ${!PNETS[@]}; do + ENTRY=${PNETS[${IFINDEX}]} + DATA=(${ENTRY//|/ }) + TYPE=${DATA[0]} + + debug "setting up network segment ranges (${IFINDEX}): ${ENTRY}" + + ## Remove the type from the array + unset DATA[0] + DATA=(${DATA[@]}) + + if [ "${TYPE}" == "vxlan" ]; then + setup_vxlan_network_segment_ranges ${IFINDEX} "${DATA[@]}" + + elif [ "${TYPE}" == "vlan" ]; then + setup_vlan_network_segment_ranges ${IFINDEX} "${DATA[@]}" + + elif [ "${TYPE}" == "flat" ]; then + continue + + else + echo "unsupported data network type: ${TYPE}" + return 1 + fi + + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup network segment range (${IFINDEX}): ${ENTRY}" + return ${RET} + fi + done + + stage_complete "network_segment_ranges" + + return 0 +} + +## Setup network qos policies +## +function add_qos_policies { + local TENANTID="" + local MGMTQOS="" + local ID="" + + source ${OPENRC} + if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + fi + + if is_stage_complete "qos"; then + info "Skipping QoS configuration; already done" + return 0 + fi + + info "Adding network qos policies" + + ID=$(get_qos_id ${EXTERNALQOS}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} qos create --name ${EXTERNALQOS} --description \"External Network Policy\" --scheduler weight=${EXTERNALQOSWEIGHT}" + fi + + ID=$(get_qos_id ${INTERNALQOS}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} qos create --name ${INTERNALQOS} --description \"Internal Network Policy\" --scheduler weight=${INTERNALQOSWEIGHT}" + fi + + for TENANT in ${TENANTS[@]}; do + TENANTID=$(get_tenant_id ${TENANT}) + MGMTQOS="${TENANT}-mgmt-qos" + ID=$(get_qos_id ${MGMTQOS}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} qos create --name ${MGMTQOS} --description \"${TENANT} Management Network Policy\" --project ${TENANTID} --scheduler weight=${MGMTQOSWEIGHT}" + fi + done + + stage_complete "qos" + + return 0 +} + +## provider network names are specified without their group prefix so add the +## prefix before passing any provider network lists to system commands. +## +function resolve_provider_networks { + local PNETS=(${1//,/ }) + local NAMES=(${PNETS[@]/#/group${GROUPNO}-}) + NAMES=$(printf ",%s" "${NAMES[@]}") + PNETS=${NAMES:1} + echo ${PNETS} +} + +function get_class_from_type { + local TYPE=$1 + local CLASS="" + if [[ "${TYPE}" == @(pxeboot|mgmt|infra|oam|cluster-host) ]]; then + CLASS="platform" + elif [ "${TYPE}" == "data" ]; then + CLASS="data" + elif [ "${TYPE}" == "pthru" ]; then + ## Use sysinv type value + CLASS="pci-passthrough" + elif [ "${TYPE}" == "sriov" ]; then + ## Use sysinv type value + CLASS="pci-sriov" + elif [ "${TYPE}" == "none" ]; then + CLASS="none" + fi + echo ${CLASS} +} + +## Sets up a single VLAN interface over a lower interface. +## +function setup_vlan_interface { + local NODE=$1 + local IFINDEX=$2 + local TYPE=$3 + local DEVICES=$4 + local MTU=$5 + local PNETS=$6 + local VLANID=$7 + local LOWER_IFNAME="" + local IFNAME="vlan${VLANID}" + + ## Check for a matching name + local DATA=($(system host-if-show ${NODE} ${IFNAME} 2> /dev/null | awk '($2 == "uuid" || $2 == "iftype") {print $4}')) + if [ ! -z "${DATA+x}" ]; then + if [ "${DATA[0]}" == "vlan" ]; then + ## Already exists + return 0 + fi + fi + + if [ -z "${VLANID}" ]; then + echo "VLANID not specified for interface(${IFINDEX}) ${DEVICES}" + return 1 + fi + + CLASS=$(get_class_from_type ${TYPE}) + ## Check for a matching type + vlanid since on small systems the infra vlan may already exist + IFACEID=$(system host-if-list ${NODE} ${CLI_NOWRAP} | awk -v CLASS=${CLASS} -v VLANID=${VLANID} '{if ($6 == CLASS && $10 == VLANID) print $2}') + if [ ! -z "${IFACEID}" ]; then + ## Already exists + return 0 + fi + + local PNET_ARGS="" + if [[ "${TYPE}" == data && "${PNETS}" != "none" ]]; then + PNETS=$(resolve_provider_networks ${PNETS}) + PNET_ARGS="${PNETS}" + fi + + local MTU_ARGS="" + if [[ "${TYPE}" == data && -n "${MTU}" ]]; then + MTU_ARGS="-m ${MTU}" + fi + + if [[ "${DEVICES}" != "0000:"* ]]; then + ## Specified as a lower interface name; nothing to do + LOWER_IFNAME=${DEVICES} + + else + ## Specified as PCIADDR. Find the port name and interface name + local PCIINFO=(${DEVICES//+/ }) + local PCIADDR=${PCIINFO[0]} + local PCIDEV=${PCIINFO[1]:-"0"} + + PORTNAME=$(get_port_name ${NODE} ${PCIADDR} ${PCIDEV}) + if [ -z "${PORTNAME}" ]; then + echo "Failed to find port name for ${PCIADDR} ${PCIDEV} on ${NODE}" + return 1 + fi + + LOWER_IFNAME=$(system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v PORTNAME=[u\'$PORTNAME\'] '($12 == PORTNAME) {print $4}') + if [ -z "${LOWER_IFNAME}" ]; then + echo "Failed to find interface name for port ${PORTNAME}" + return 1 + fi + fi + + local LOWER_MTU=$(system host-if-show ${NODE} ${LOWER_IFNAME} | awk '($2 == "imtu") {print $4}') + if [ -z "${LOWER_MTU}" ]; then + echo "Interface ${LOWER_IFNAME} not found\n" + return 1 + fi + if [ ${LOWER_MTU} -lt ${MTU} ]; then + log_command "system host-if-modify ${NODE} ${LOWER_IFNAME} -m ${MTU}" + fi + + if [ ${CLASS} == "platform" ]; then + NET_ID=`system network-list |grep ${TYPE} | awk '{print $4}'` + log_command "system host-if-add ${MTU_ARGS} -V ${VLANID} -c ${CLASS} --networks ${NET_ID} ${NODE} ${IFNAME} vlan ${PNET_ARGS} ${LOWER_IFNAME}" + else + log_command "system host-if-add ${MTU_ARGS} -V ${VLANID} -c ${CLASS} ${NODE} ${IFNAME} vlan ${PNET_ARGS} ${LOWER_IFNAME}" + fi + + return 0 +} + + +## Sets up a single ethernet interface as a data or infra type interface +## +function setup_ethernet_interface { + local NODE=$1 + local IFINDEX=$2 + local TYPE=$3 + local DEVICES=$4 + local MTU=$5 + local PNETS=$6 + + shift 6 + ARGS=($@) + + local PORTNAME="" + local IFNAME="${TYPE:0:7}${IFINDEX}" + #IFNAME=($(echo ${IFNAME:0:10})) + + local DATA=($(system host-if-show ${NODE} ${IFNAME} 2> /dev/null | awk '($2 == "uuid" || $2 == "iftype") {print $4}')) + if [ ! -z "${DATA+x}" ]; then + if [ "${DATA[0]}" == "ethernet" ]; then + ## Already exists + return 0 + fi + fi + + if [[ "${TYPE}" == @(pxeboot|mgmt) ]]; then + echo "Creating mgmt or pxeboot ethernet interfaces is not supported" + return 1 + fi + + local PNET_ARGS="" + if [[ "${TYPE}" == @(data|pthru|sriov) ]]; then + if [ "${PNETS}" != "none" ]; then + PNETS=$(resolve_provider_networks ${PNETS}) + PNET_ARGS="-p ${PNETS}" + fi + fi + + local VF_ARGS="" + if [ "${TYPE}" == "sriov" ]; then + VF_ARGS="-N ${ARGS[0]}" + fi + + local MTU_ARGS="" + if [[ "${TYPE}" == @(data|pthru|sriov) ]]; then + if [ ! -z "${MTU}" ]; then + MTU_ARGS="-m ${MTU}" + fi + fi + + if [[ "${DEVICES}" == "eth"* ]]; then + ## Specified as a port name; nothing to do + PORTNAME=${DEVICES} + + elif [[ "${DEVICES}" == "0000:"* ]]; then + ## Specified as a PCIADDR; convert to a port name + local PCIINFO=(${DEVICES//+/ }) + local PCIADDR=${PCIINFO[0]} + local PCIDEV=${PCIINFO[1]:-"0"} + + PORTNAME=$(get_port_name ${NODE} ${PCIADDR} ${PCIDEV}) + if [ -z "${PORTNAME}" ]; then + echo "Failed to find port name for ${PCIADDR} ${PCIDEV} on ${NODE}" + return 1 + fi + else + ## Specified as an interface name (e.g., mgmt0, pxeboot0, etc...) + IFNAME=${DEVICES} + fi + + ## Get the interface ID and class + if [ ! -z "${PORTNAME}" ]; then + ## ... by port name + local DATA=($(system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v PORTNAME=[u\'${PORTNAME}\'] '($12 == PORTNAME) {printf "%s %s\n", $2, $6}')) + else + ## ... by interface name + local DATA=($(system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v IFNAME=${IFNAME} '($4 == IFNAME) {printf "%s %s\n", $2, $6}')) + fi + + local IFACEID=${DATA[0]} + local CLASS=${DATA[1]} + if [ -z "${IFACEID}" -o -z "${CLASS}" ]; then + echo "Failed to get interface id (${IFACEID}) or class (${CLASS}) for ${DEVICES}" + return 1 + fi + + CLASS=$(get_class_from_type ${TYPE}) + if [ ${CLASS} == "platform" ]; then + NET_ID=`system network-list |grep ${TYPE} | awk '{print $4}'` + log_command "system host-if-modify ${MTU_ARGS} -n ${IFNAME} ${VF_ARGS} ${PNET_ARGS} -c ${CLASS} --networks ${NET_ID} ${NODE} ${IFACEID}" + else + log_command "system host-if-modify ${MTU_ARGS} -n ${IFNAME} ${VF_ARGS} ${PNET_ARGS} -c ${CLASS} ${NODE} ${IFACEID}" + fi + + return 0 +} + + +## Sets up a single AE ethernet interface as a data, infra, or mgmt type interface +## +function setup_ae_interface { + local NODE=$1 + local IFINDEX=$2 + local TYPE=$3 + local DEVICES=(${4//,/ }) + local MTU=$5 + local PNETS=$6 + set +u + local AEMODE=$7 + local AEHASH=$8 + set -u + #local IFNAME="${TYPE}${IFINDEX}" + local IFNAME="${TYPE:0:7}${IFINDEX}" + + local DATA=($(system host-if-show ${NODE} ${IFNAME} 2> /dev/null | awk '($2 == "uuid" || $2 == "iftype") {print $4}')) + if [ ! -z "${DATA+x}" ]; then + if [ "${DATA[0]}" == "ae" ]; then + ## Already exists + return 0 + elif [ "${TYPE}" == "pxeboot" -a "${DATA[0]}" == "ethernet" ]; then + ## Already exists as the default ethernet pxeboot interface; remove + ## the upper mgmt VLAN interface before continuing (lab_cleanup.sh + ## is capable of restoring this if this was not intentional) + log_warning "Removing existing mgmt0 VLAN interfaces" + log_command "system host-if-delete ${NODE} mgmt0" + fi + fi + + if [ -z "${AEMODE}" ]; then + ## Get global default value + AEMODE=$(get_variable ${TYPE} AEMODE) + fi + + if [ -z "${AEHASH}" ]; then + ## Get global default value + AEHASH=$(get_variable ${TYPE} AEHASH) + fi + + local PNET_ARGS="none" + if [ "${TYPE}" == "data" ]; then + PNETS=$(resolve_provider_networks ${PNETS}) + PNET_ARGS="${PNETS}" + elif [ "${TYPE}" == "bond" ]; then + ## Special case to be able to create an AE strictly for later placing + ## VLAN interfaces on top of it. + TYPE="none" + PNET_ARGS="none" + fi + + local MTU_ARGS="" + if [[ "${TYPE}" == @(pxeboot|data) ]]; then + if [ ! -z "${MTU}" ]; then + MTU_ARGS="-m ${MTU}" + fi + fi + + if [[ "${DEVICES[0]}" != "0000:"* ]]; then + ## Specified as port names; nothing to do + PORT0NAME=${DEVICES[0]} + PORT1NAME=${DEVICES[1]:-""} + + else + ## Convert to port names + PCIINFO=(${DEVICES[0]//+/ }) + PCI0ADDR=${PCIINFO[0]} + PCI0DEV=${PCIINFO[1]:-"0"} + + PORT0NAME=$(get_port_name ${NODE} ${PCI0ADDR} ${PCI0DEV}) + if [ -z "${PORT0NAME}" ]; then + echo "Failed to find port name for ${PCI0ADDR} ${PCI0DEV} on ${NODE}" + return 1 + fi + + PORT1NAME="" + if [ ! -z "${DEVICES[1]+x}" ]; then + PCIINFO=(${DEVICES[1]//+/ }) + PCI1ADDR=${PCIINFO[0]} + PCI1DEV=${PCIINFO[1]:-"0"} + + PORT1NAME=$(get_port_name ${NODE} ${PCI1ADDR} ${PCI1DEV}) + if [ -z "${PORT1NAME}" ]; then + echo "Failed to find port name for ${PCI1ADDR} ${PCI1DEV} on ${NODE}" + return 1 + fi + fi + fi + + ## Convert to interface names and make sure their network type is None + local DATA0IFNAME=$(system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v PORT0NAME=[u\'$PORT0NAME\'] '($12 == PORT0NAME) {print $4}') + log_command "system host-if-modify ${NODE} ${DATA0IFNAME} -n ${PORT0NAME} -c none" + DATA0IFNAME=${PORT0NAME} + + local DATA1IFNAME="" + if [ ! -z "${PORT1NAME}" ]; then + DATA1IFNAME=$(system host-if-list -a ${NODE} ${CLI_NOWRAP} | awk -v PORT1NAME=[u\'$PORT1NAME\'] '($12 == PORT1NAME) {print $4}') + log_command "system host-if-modify ${NODE} ${DATA1IFNAME} -n ${PORT1NAME} -c none" + DATA1IFNAME=${PORT1NAME} + fi + + ## Create the AE interface + local AE_ARGS="-a ${AEMODE}" + if [ "${AEMODE}" != "active_standby" ]; then + AE_ARGS="${AE_ARGS} -x ${AEHASH}" + fi + CLASS=$(get_class_from_type ${TYPE}) + if [ ${CLASS} == "platform" ]; then + NET_ID=`system network-list |grep ${TYPE} | awk '{print $4}'` + log_command "system host-if-add ${MTU_ARGS} ${AE_ARGS} -c ${CLASS} --networks ${NET_ID} ${NODE} ${IFNAME} ae ${PNET_ARGS} ${DATA0IFNAME} ${DATA1IFNAME}" + else + log_command "system host-if-add ${MTU_ARGS} ${AE_ARGS} -c ${CLASS} ${NODE} ${IFNAME} ae ${PNET_ARGS} ${DATA0IFNAME} ${DATA1IFNAME}" + + fi + return 0 +} + + +## Setup data interfaces on a single node +## +function setup_interfaces { + local NODE=$1 + local TYPE=$2 + local NETTYPE=${TYPE//-/_} + local INTERFACES=($(get_node_variable ${NODE} ${NETTYPE^^}_INTERFACES)) + local IFINDEX_SHIFT=0 + + if [ -z "${INTERFACES+x}" ]; then + return 0 + fi + + debug " Adding ${TYPE} interfaces for ${NODE}" + + if [ "${TYPE}" == "bond" ]; then + ## Get bond AE interfaces number and use it as IFINDEX shift + IFINDEX_SHIFT=$(system host-if-list ${NODE} | awk '($6 != "None" && $8 == "ae" && $4 ~ /^bond/) {print}'| wc -l) + fi + + for IFINDEX in ${!INTERFACES[@]}; do + local ENTRY=${INTERFACES[${IFINDEX}]} + local DATA=(${ENTRY//|/ }) + local IFTYPE=${DATA[0]} + + debug "setting up ${TYPE} interface(${IFINDEX}) on ${NODE}: ${ENTRY}" + + ## Remove the IFTYPE from the array + unset DATA[0] + DATA=(${DATA[@]}) + + if [ "${IFTYPE}" == "vlan" ]; then + setup_vlan_interface ${NODE} ${IFINDEX} ${TYPE} "${DATA[@]}" + + elif [ "${IFTYPE}" == "ae" ]; then + setup_ae_interface ${NODE} $((IFINDEX+IFINDEX_SHIFT)) ${TYPE} "${DATA[@]}" + + elif [ "${IFTYPE}" == "ethernet" ]; then + setup_ethernet_interface ${NODE} ${IFINDEX} ${TYPE} "${DATA[@]}" + + else + echo "unsupported ${TYPE} interface type: ${IFTYPE}" + return 1 + fi + + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to ${TYPE} data interface ${IFINDEX}, ${IFTYPE} ${DEVICES}" + return ${RET} + fi + done + + return 0 +} + +#change memory settings on a virtual worker node +function set_reserved_memory { + local TYPES="" + local NODE="" + + for NODE in ${NODES}; do + if is_stage_complete "memory" ${NODE}; then + info "Skipping memory configuration for ${NODE}; already done" + continue + fi + + info "Configuring memory on ${NODE}" + log_command "system host-memory-modify -m 1200 -2M 256 ${NODE} 0" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set addressing mode to static on ${NODE}:${IFNAME}" + return ${RET} + fi + + log_command "system host-memory-modify -m 1000 -2M 256 ${NODE} 1" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set addressing mode to static on ${NODE}:${IFNAME}" + return ${RET} + fi + + stage_complete "memory" ${NODE} + done + + return 0 +} + +#create flag file for virtual worker nodes +function write_flag_file { + local TYPES="" + local NODE="" + local VIRTDIR="/etc/sysinv/.virtual_worker_nodes" + sudo test -d ${VIRTDIR} || sudo mkdir ${VIRTDIR} + for NODE in ${NODES}; do + if is_stage_complete "virtual_flags" ${NODE}; then + info "Skipping Flag creation for ${NODE}; already done" + continue + fi + info "Writing flag for ${NODE}" + local IP=$(get_mgmt_ip ${NODE}) + echo ${IP} + sudo touch ${VIRTDIR}/${IP} + stage_complete "virtual_flags" ${NODE} + done + + return 0 +} + +## Setup data interfaces on all worker nodes +## +function add_interfaces { + local TYPES="" + local NODE="" + + for NODE in ${NODES} ${CONTROLLER_NODES} ${STORAGE_NODES}; do + if is_stage_complete "interfaces" ${NODE}; then + info "Skipping interface configuration for ${NODE}; already done" + continue + fi + + info "Adding interfaces on ${NODE}" + + if [[ "${NODE}" == *"storage"* ]]; then + TYPES="bond pxeboot mgmt infra cluster-host" + elif [[ "${NODE}" == *"controller"* ]]; then + if [ "${SMALL_SYSTEM}" == "yes" ]; then + TYPES="bond pxeboot mgmt data infra pthru sriov oam cluster-host" + else + TYPES="bond pxeboot mgmt infra oam cluster-host" + fi + else + TYPES="bond pxeboot mgmt data infra pthru sriov cluster-host" + fi + + for TYPE in ${TYPES}; do + if [[ "${SMALL_SYSTEM}" == "yes" && "${NODE}" == "controller-0" ]]; then + if [[ "${TYPE}" == @(mgmt|infra|cluster-host) ]]; then + ## config_controller should have done these + continue + fi + fi + + setup_interfaces ${NODE} ${TYPE} + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup ${TYPE} interfaces for ${NODE}" + return ${RET} + fi + done + + stage_complete "interfaces" ${NODE} + done + + return 0 +} + + +## Before adding an IP address to a data interface the addressing mode must be +## set to 'static' for the correct address family. +## +function setup_data_static_address_modes { + local NODE=$1 + local IFNAME=$2 + local IPLIST=$3 + local IPV4_MODE="" + local IPV6_MODE="" + + IPARRAY=(${IPLIST//,/ }) + for IPADDR in ${IPARRAY[@]}; do + IPFIELDS=(${IPADDR//\// }) + + if [ ${IPFIELDS[0]} == "na" ]; then + continue + fi + + RET=0 + set +e + is_ipv4 ${IPFIELDS[0]} + IS_IPV4=$? + set -e + if [ ${IS_IPV4} -eq 1 ]; then + IPV4_MODE=$(get_interface_ipv4_mode ${NODE} ${IFNAME}) + if [ "${IPV4_MODE}" != "static" ]; then + log_command "system host-if-modify ${NODE} ${IFNAME} --ipv4-mode=static" + RET=$? + fi + else + IPV6_MODE=$(get_interface_ipv6_mode ${NODE} ${IFNAME}) + if [ "${IPV6_MODE}" != "static" ]; then + log_command "system host-if-modify ${NODE} ${IFNAME} --ipv6-mode=static" + RET=$? + fi + fi + + if [ ${RET} -ne 0 ]; then + echo "Failed to set addressing mode to static on ${NODE}:${IFNAME}" + return ${RET} + fi + + done + + return 0 +} + +## Setup IP addresses by taking a comma seperated list of route descriptors +## for a node + interface. +## +function setup_data_addresses { + local NODE=$1 + local IFNAME=$2 + local IPLIST=$3 + + debug "Setting up data addresses for ${IFNAME} on ${NODE}: ${IPLIST}" + + setup_data_static_address_modes ${NODE} ${IFNAME} ${IPLIST} + RET=$? + if [ ${RET} -ne 0 ]; then + return ${RET} + fi + + IPARRAY=(${IPLIST//,/ }) + for IPADDR in ${IPARRAY[@]}; do + IPFIELDS=(${IPADDR//\// }) + + if [ ${IPFIELDS[0]} == "na" ]; then + continue + fi + + IPFIELDS=(${IPADDR//\// }) + ID=`system host-addr-list ${NODE} ${CLI_NOWRAP} | grep -E "${IFNAME}[^0-9].* ${IPFIELDS[0]}[^0-9]" | awk '{print $2}'` + if [ -z "${ID}" ]; then + log_command "system host-addr-add ${NODE} ${IFNAME} ${IPFIELDS[0]} ${IPFIELDS[1]}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to add ${IPFIELDS[0]}/${IPFIELDS[1]} to ${IFNAME} on ${NODE}" + return ${RET} + fi + fi + done + + return 0 +} + +## Setup IP routes by taking a comma seperated list of route descriptors for a +## node + interface. +## +function setup_data_routes { + local NODE=$1 + local IFNAME=$2 + local ROUTELIST=$3 + local METRIC=1 + + debug "Setting up data addresses for ${IFNAME} on ${NODE}: ${ROUTELIST}" + + ROUTEARRAY=(${ROUTELIST//,/ }) + for IPROUTE in ${ROUTEARRAY[@]}; do + IPFIELDS=(${IPROUTE//\// }) + + if [ ${IPFIELDS[0]} == "na" ]; then + continue + fi + + ID=`system host-route-list ${NODE} ${CLI_NOWRAP}| grep -E "${IFNAME}[^0-9].* ${IPFIELDS[0]}[^0-9].* ${IPFIELDS[2]}[^0-9]" | awk '{print $2}'` + if [ -z "${ID}" ]; then + log_command "system host-route-add ${NODE} ${IFNAME} ${IPFIELDS[0]} ${IPFIELDS[1]} ${IPFIELDS[2]} ${METRIC}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to add ${IPFIELDS[0]}/${IPFIELDS[1]} via ${IPFIELDS[2]} to ${IFNAME} on ${NODE}" + return ${RET} + fi + fi + done + + return 0 +} + + +## Setup data pools to later be associated to data interfaces +## +function setup_data_address_pools { + local POOLS=$1 + + for POOL in ${POOLS}; do + local FIELDS=(${POOL//|/ }) + local NAME="group${GROUPNO}-${FIELDS[0]}" + local NETWORK=${FIELDS[1]} + local PREFIX=${FIELDS[2]} + local ORDER=${FIELDS[3]} + local RANGES=${FIELDS[4]} + + local ID=$(get_addrpool_id ${NAME}) + if [ -z "${ID}" ]; then + log_command "system addrpool-add ${NAME} ${NETWORK} ${PREFIX} --order ${ORDER} --ranges ${RANGES}" + fi + + done + + return 0 +} + + +## Associate data address pools to data interfaces +function setup_data_address_pools_on_interface { + local NODE=$1 + local IFNAME=$2 + local POOLS=$3 + local IPV4_MODE="" + local IPV6_MODE="" + + for POOL in ${POOLS}; do + local FIELDS=(${POOL//|/ }) + local NAME="group${GROUPNO}-${FIELDS[0]}" + local NETWORK=${FIELDS[1]} + + RET=0 + set +e + is_ipv4 ${NETWORK} + IS_IPV4=$? + set -e + + if [ ${IS_IPV4} -eq 1 ]; then + IPV4_MODE=$(get_interface_ipv4_mode ${NODE} ${IFNAME}) + if [ "${IPV4_MODE}" != "pool" ]; then + log_command "system host-if-modify ${NODE} ${IFNAME} --ipv4-mode=pool --ipv4-pool=${NAME}" + RET=$? + fi + else + IPV6_MODE=$(get_interface_ipv6_mode ${NODE} ${IFNAME}) + if [ "${IPV6_MODE}" != "static" ]; then + log_command "system host-if-modify ${NODE} ${IFNAME} --ipv6-mode=pool --ipv6-pool=${NAME}" + RET=$? + fi + fi + + if [ ${RET} -ne 0 ]; then + echo "Failed to set addressing mode to pool on ${NODE}:${IFNAME}" + return ${RET} + fi + + done + + return 0 +} + + +## Setup data interface IP addresses and routes on all worker nodes +## +function add_data_addresses_and_routes { + local NODE="" + local ID="" + + VXLAN_PNETS=$(system ${REGION_OPTION} datanetwork-list | grep group${GROUPNO} | grep vxlan | awk '{print $4}') + if [ -z "${VXLAN_PNETS}" ]; then + ## No VXLAN provider networks exist; nothing to do + return 0 + fi + + debug "found VXLAN provider networks: ${VXLAN_PNETS}" + + for NODE in ${NODES}; do + ## Process all worker nodes + + if is_stage_complete "addresses" ${NODE}; then + info "Skipping interface address configuration for ${NODE}; already done" + continue + fi + + info "Adding IP addresses and routes to data interfaces on ${NODE}" + + IFACES=$(system host-if-list ${NODE} ${CLI_NOWRAP} | grep -E "[a-z0-9]{8}-" | sed -e 's/[ \t]*//g' | awk -F\| '{printf "%s|%s\n", $3, $11}') + + info "Adding addresses: interfaces ${IFACES}" + + for ENTRY in ${IFACES}; do + ## Look at all interfaces that have provider networks + DATA=(${ENTRY//|/ }) + IFNAME=${DATA[0]} + PNETS=${DATA[1]} + + if [ "${PNETS}" == "None" ]; then + info "Adding PNETS None: PNETS ${PNETS}" + continue + fi + + ## Lookup the pool(s) associated to this interface + set +u + VARNAME=${IFNAME^^}_IPPOOLS + IPPOOLS=${!VARNAME} + set -u + if [ ! -z "${IPPOOLS}" ]; then + setup_data_address_pools "${IPPOOLS}" + RET=$? + if [ ${RET} -ne 0 ]; then + return ${RET} + fi + fi + + ## This one has provider networks but exclude it if it doesn't have + ## any VXLAN provider networks. + for PNET in ${VXLAN_PNETS}; do + if [[ "${PNETS}" != *"${PNET}"* ]]; then + ## This interface has no VXLAN provider networks + continue + fi + + IPADDRS=$(get_node_variable ${NODE} ${IFNAME^^}_IPADDRS) + if [ -z "${IPADDRS}" -a -z "${IPPOOLS}" ]; then + echo "No IP addresses defined for ${IFNAME} on ${NODE}" + return 1 + fi + + if [ ! -z "${IPADDRS}" ]; then + setup_data_addresses ${NODE} ${IFNAME} ${IPADDRS} + RET=$? + if [ ${RET} -ne 0 ]; then + return ${RET} + fi + elif [ ! -z "${IPPOOLS}" ]; then + setup_data_address_pools_on_interface ${NODE} ${IFNAME} "${IPPOOLS}" + RET=$? + if [ ${RET} -ne 0 ]; then + return ${RET} + fi + fi + + IPROUTES=$(get_node_variable ${NODE} ${IFNAME^^}_IPROUTES) + if [ -z "${IPROUTES}" ]; then + ## routes are optional + continue + fi + + setup_data_routes ${NODE} ${IFNAME} ${IPROUTES} + RET=$? + if [ ${RET} -ne 0 ]; then + return ${RET} + fi + + ## Do not repeat if there are multiple VXLAN datanetworks on + ## this interface. + break + done + done + + stage_complete "addresses" ${NODE} + + done + + return 0 +} + +## Setup custom firewall rules +## +function setup_custom_firewall_rules { + source ${OPENRC} + + if is_stage_complete "firewall_rules"; then + info "Skipping custom firewall rules; already done" + return 0 + fi + + info "Adding custom firewall rules" + log_command "system firewall-rules-install ${FIREWALL_RULES_FILE}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to install custom firewall rules ret=${RET}" + return ${RET} + fi + + stage_complete "firewall_rules" + return 0 +} + +## Setup board management configuration for all nodes +## +function add_board_management { + if [ "x${BM_ENABLED}" != "xyes" ]; then + return 0 + fi + + if is_stage_complete "bmc"; then + info "Skipping board management configuration; already done" + return 0 + fi + + info "Adding board management config on all nodes" + + ALL_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if ($6=="worker" || $6=="controller") print $4;}') + for NODE in ${ALL_NODES} ${STORAGE_NODES}; do + STATE=$(system host-show ${NODE} | grep administrative | awk '{print $4}') + if [ "x${STATE}" != "xlocked" ]; then + log_warning "Node ${NODE} not locked; cannot set board management configuration" + continue + fi + + MACADDR=$(get_node_variable ${NODE} BM_MACADDR) + if [ -z "${MACADDR}" ]; then + ## not configured on this node + continue + fi + + USERNAME=$(get_node_variable ${NODE} BM_USERNAME) + PASSWORD=$(get_node_variable ${NODE} BM_PASSWORD) + if [ -z "${USERNAME}" -o -z "${PASSWORD}" ]; then + log "Configuration for ${NODE} missing BM username and/or password" + continue + fi + + TYPE=$(get_node_variable ${NODE} BM_TYPE) + if [ -z "${TYPE}" ]; then + log "Configuration for ${NODE} missing BM type" + continue + fi + + log_command "system host-update ${NODE} bm_mac=${MACADDR} bm_type=${TYPE} bm_username=${USERNAME} bm_password=${PASSWORD}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set BM MAC, Type, Username, Password on ${NODE}, ret=${RET}" + return ${RET} + fi + + done + + stage_complete "bmc" + + return 0 +} + + +## Setup vswitch CPUs on worker nodes +## +function setup_vswitch_cpus { + for NODE in ${NODES}; do + if is_stage_complete "vswitch_cpus" ${NODE}; then + info "Skipping vswitch CPU configuration for ${NODE}; already done" + continue + fi + + info "Adding vswitch CPU config on ${NODE}" + + DATA=($(system host-show ${NODE} | grep -E "personality|administrative" | awk '{print $4}')) + STATE=${DATA[0]} + PERSONALITY=${DATA[1]} + if [ "x${PERSONALITY}" == "xworker" -a "x${STATE}" != "xlocked" ]; then + log_warning "Node ${NODE} not locked; cannot set vswitch CPU configuration" + continue + fi + + # Use the supplied PCPU mapping, otherwise configure against the default numa node + PCPU_MAP=$(get_node_variable ${NODE} VSWITCH_PCPU_MAP) + if [ -z "${PCPU_MAP}" ]; then + PCPU_MAP="0:$(get_node_variable ${NODE} VSWITCH_PCPU)" + fi + + PCPU_ARRAY=(${PCPU_MAP//,/ }) + for PCPU in ${PCPU_ARRAY[@]}; do + ARRAY=(${PCPU//:/ }) + NUMA=${ARRAY[0]} + REQUIRED=${ARRAY[1]} + + CURRENT=$(system host-cpu-list ${NODE} |grep vSwitch | awk -v numa="$NUMA" '$6==numa { ++count } BEGIN { count=0 } END { print count }') + + if [ ${CURRENT} -ne ${REQUIRED} ]; then + log_command "system host-cpu-modify ${NODE} -f vswitch -p${NUMA} ${REQUIRED}" + fi + done + + stage_complete "vswitch_cpus" ${NODE} + done + + return 0 +} + +function wait_for_drbd_resize { + DELAY=0 + + while [ $DELAY -lt ${FS_RESIZE_TIMEOUT} ]; do + log_command "system controllerfs-list" + FS=$(system controllerfs-list ${CLI_NOWRAP} | grep drbd_fs_resizing_in_progress | awk '{print $4}') + if [ -n "${FS}" ]; then + log "Waiting for resizing to be done for filesystem $FS" + sleep 5 + DELAY=$((DELAY + 5)) + else + # Only three possible states: drbd_fs_resizing_in_progress, available, None + # When state is "available" or "None", it means resizing is done. + log "Done resizing controller filesystems" + return 0 + fi + done + + info "Timed out waiting for drbd resizing" + echo -e "system controllerfs-list\n $(system controllerfs-list)" + echo -e "drbd-overview\n $(drbd-overview)" + return 1 +} + +function wait_for_drbd_sync { + DELAY=0 + + while [ $DELAY -lt ${DRBD_SYNC_TIMEOUT} ]; do + log_command "drbd-overview" + STATE=$(drbd-overview | grep sync\'ed | awk '{print $1}') + # When drbd is syncing the standby node becomes degraded + if [ "${STATE}" ]; then + log "Waiting for drbd syncing to be done" + sleep 5 + DELAY=$((DELAY + 5)) + else + log "Done drbd sync" + return 0 + fi + done + + info "Timed out waiting for drbd sync" + echo -e "system controllerfs-list\n $(system controllerfs-list)" + echo -e "drbd-overview\n $(drbd-overview)" + return 1 +} + +function wait_for_controller_degrade_to_clear { + DELAY=0 + + while [ $DELAY -lt ${FS_RESIZE_DEGRADE_TIMEOUT} ]; do + log_command "system host-list" + STATE=$(system host-list ${CLI_NOWRAP} | grep controller | grep degraded | awk '{print $2}') + if [ "${STATE}" ]; then + log "Waiting for degrade to clear on controllers" + sleep 5 + DELAY=$((DELAY + 5)) + else + log "Controllers degraded state cleared" + return 0 + fi + done + + info "Timed out waiting for degrade to clear on controllers" + return 1 +} + +## Resize following file systems from default to desired size: +## database, image, backup, img-conversions +## +function resize_controller_filesystem { + info "Resizing controller filesystems ..." + + if is_stage_complete "resize_fs"; then + info "Skipping controller file systems resize; already done" + return 0 + fi + + ## For AIO-DX and standard lab, wait until 2nd controller is unlocked/available + ## to do the resizing; otherwise controllerfs-modify will fail + if [ -z "${AVAIL_CONTROLLER_NODES}" -a "${SYSTEM_MODE}" != "simplex" ]; then + info "WARNING:" + info "Following controller file systems, database, image, backup, img-conversions " + info "have default settings. Rerun this command after both controllers are in " + info "unlocked/available state if their original sizes are required." + return 0 + fi + + source ${OPENRC} + local -i DEFAULT_DATABASE_FS_SIZE=$(system controllerfs-show database | grep size | awk '{print $4}') + local -i DEFAULT_IMAGE_FS_SIZE=$(system controllerfs-show glance | grep size | awk '{print $4}') + local -i DEFAULT_BACKUP_FS_SIZE=$(system controllerfs-show backup | grep size | awk '{print $4}') + local -i DEFAULT_IMG_CONVERSIONS_FS_SIZE=$(system controllerfs-show img-conversions | grep size | awk '{print $4}') + + local cmd="system controllerfs-modify " + local args="" + + if [ "${DEFAULT_BACKUP_FS_SIZE}" -ne 0 -a "${DEFAULT_BACKUP_FS_SIZE}" -lt "${BACKUP_FS_SIZE}" ]; then + args+="backup=${BACKUP_FS_SIZE} " + fi + + if [ -n "${DEFAULT_IMG_CONVERSIONS_FS_SIZE}" -a "${DEFAULT_IMG_CONVERSIONS_FS_SIZE}" -lt "${IMG_CONVERSIONS_FS_SIZE}" ]; then + args+="img-conversions=${IMG_CONVERSIONS_FS_SIZE} " + fi + + DRBD=0 + # Resize the drbd file systems if needed + if [ "${DEFAULT_DATABASE_FS_SIZE}" -ne 0 -a "${DEFAULT_DATABASE_FS_SIZE}" -lt "${DATABASE_FS_SIZE}" ]; then + args+="database=${DATABASE_FS_SIZE} " + DRBD=$((DRBD + 1)) + fi + + # Resize the drbd file systems if needed + if [ "${CONFIGURE_STORAGE_LVM}" == "yes" ]; then + if [ "${DEFAULT_IMAGE_FS_SIZE}" -ne 0 -a "${DEFAULT_IMAGE_FS_SIZE}" -lt "${IMAGE_FS_SIZE}" ]; then + args+="glance=${IMAGE_FS_SIZE} " + DRBD=$((DRBD + 1)) + fi + fi + + # No need to resize + if [ -z "${args}" ]; then + log "No need to resize file systems" + stage_complete "resize_fs" + return 0 + fi + + log_command "${cmd} ${args}" + + info "Resizing controller filesystems ..." + + if [ "${DRBD}" -ne 0 ]; then + # Wait till the drdb resizing is done before proceeding + wait_for_drbd_resize + RET=$? + if [ ${RET} -ne 0 ]; then + info "Failed drbd resize for the database filesystem" + log_command "system controllerfs-list" + log_command "drbd-overview" + return 1 + fi + + # Wait till the drdb sync is done before proceeding + wait_for_drbd_sync + RET=$? + if [ ${RET} -ne 0 ]; then + info "Timed-out drbd sync for the database filesystem" + log_command "system controllerfs-list" + log_command "drbd-overview" + return 1 + fi + + # Wait till the controllers are available before proceeding with the next resize + wait_for_controller_degrade_to_clear + RET=$? + if [ ${RET} -ne 0 ]; then + info "Timed-out clearing degraded state for controllers" + log_command "system controllerfs-list" + log_command "drbd-overview" + return 1 + fi + fi + + info "Done resizing controller filesystems" + log_command "system controllerfs-list" + log_command "drbd-overview" + + stage_complete "resize_fs" + return 0 +} + +## Setup Additional Ceph Storage Tiers +## +function setup_ceph_storage_tiers { + if is_stage_complete "ceph_storage_tiers"; then + info "Skipping creation of ceph storage tiers; already done" + continue + fi + + if [ -z "${STORAGE_TIERS_CEPH}" ]; then + stage_complete "ceph_storage_tiers" + return 0 + fi + + info "Adding additional ceph storage tiers" + + local TIER_ARRAY=(${STORAGE_TIERS_CEPH//|/ }) + for TINDEX in ${!TIER_ARRAY[@]}; do + local TIER_UUID=$(system storage-tier-list ceph_cluster ${CLI_NOWRAP} | grep ${TIER_ARRAY[${TINDEX}]} | awk '{print $2}') + if [ "${TIER_UUID}" == "" ]; then + # Add the storage tier + log_command "system storage-tier-add ceph_cluster ${TIER_ARRAY[${TINDEX}]}" + else + info "Skipping adding storage tier ${TIER_ARRAY[${TINDEX}]}; already exists" + fi + done + + stage_complete "ceph_storage_tiers" + + return 0 +} + +## Setup Additional Ceph Backends +## +function setup_ceph_storage_tier_backends { + if is_stage_complete "ceph_storage_tier_backends"; then + info "Skipping creation of additional ceph storage backends; already done" + continue + fi + + if [ -z "${STORAGE_TIERS_CEPH}" ]; then + stage_complete "ceph_storage_tier_backends" + return 0 + fi + + info "Adding additional ceph storage tier backends" + + local TIER_ARRAY=(${STORAGE_TIERS_CEPH//|/ }) + for TINDEX in ${!TIER_ARRAY[@]}; do + local TIER_UUID=$(system storage-tier-list ceph_cluster ${CLI_NOWRAP} | grep ${TIER_ARRAY[${TINDEX}]} | awk '{print $2}') + if [ "${TIER_UUID}" == "" ]; then + echo "Error adding storage backend for ${TIER_ARRAY[${TINDEX}]}; tier is missing" + return 1 + else + local BACKEND_UUID=$(system storage-backend-list ${CLI_NOWRAP} | grep "${TIER_ARRAY[${TINDEX}]}-store" | awk '{print $2}') + if [ "${BACKEND_UUID}" == "" ]; then + log_command "system storage-backend-add --services cinder --name ${TIER_ARRAY[${TINDEX}]}-store -t $TIER_UUID ceph" + + # Wait for the backend to be applied + wait_for_backend_configuration ceph "${TIER_ARRAY[${TINDEX}]}-store" + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + fi + else + info "Skipping adding storage backend for ${TIER_ARRAY[${TINDEX}]}; already exists" + fi + fi + done + + stage_complete "ceph_storage_tier_backends" + + return 0 +} + +## Setup Journal devices for storage nodes +## +function setup_journal_storage { + for NODE in ${STORAGE_NODES}; do + if is_stage_complete "journals" ${NODE}; then + info "Skipping creation of journal stors for ${NODE}; already done" + continue + fi + + local JOURNAL_DEVICES=$(get_node_variable ${NODE} JOURNAL_DEVICES) + if [ -z "${JOURNAL_DEVICES}" ]; then + stage_complete "journals" ${NODE} + continue + fi + + info "Adding journal stors on ${NODE}" + + DISKS=$(system host-disk-list ${NODE} ${CLI_NOWRAP}) + STORS=$(system host-stor-list ${NODE} ${CLI_NOWRAP}) + + for DEVNAME in ${JOURNAL_DEVICES}; do + DISKID=$(echo "$DISKS" | grep ${DEVNAME} | awk '{print $2}') + if [ -z "${DISKID}" ]; then + echo "No device named ${DEVNAME} on ${NODE}" + return 1 + fi + + STORID=$(echo "${STORS}" | grep ${DISKID} | awk '{print $2}') + if [ -z "${STORID}" ]; then + log_command "system host-stor-add ${NODE} journal ${DISKID}" + fi + done + + stage_complete "journals" ${NODE} + done + + return 0 +} + +## Setup OSD storage for storage nodes +## +function setup_osd_storage { + ##OSD can be only added to storage nodes when controller-1 is available + if [ -z "${AVAIL_CONTROLLER_NODES}" ]; then + local ALL_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if ($6=="controller" && ($12 != "offline")) print $4;}') + else + local ALL_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if (($6=="controller" || $6=="storage") && ($12 != "offline")) print $4;}') + fi + + for NODE in ${ALL_NODES}; do + if is_stage_complete "osd" ${NODE}; then + info "Skipping addition of OSDs on ${NODE}" + continue + fi + + if [[ "${SMALL_SYSTEM}" != "yes" && $NODE == *"controller"* ]]; then + continue + fi + + local OSD_DEVICES=$(get_node_variable ${NODE} OSD_DEVICES) + if [ -z "${OSD_DEVICES}" ]; then + stage_complete "osd" ${NODE} + continue + fi + + info "Adding OSD volumes on ${NODE}" + + # Validate config + for DEV_CFG_STR in ${OSD_DEVICES}; do + local DEV_CFG_ARRAY=(${DEV_CFG_STR//|/ }) + if [ ${#DEV_CFG_ARRAY[@]} -gt 4 ]; then + echo "Journal settings for host ${NODE} has an invalid format: Incorrect number of fields" + echo " ${DEV_CFG_STR}" + return 2 + fi + done + + TIERS=$(system storage-tier-list ceph_cluster ${CLI_NOWRAP}) + DISKS=$(system host-disk-list ${NODE} ${CLI_NOWRAP}) + STORS=$(system host-stor-list ${NODE} ${CLI_NOWRAP}) + + for DEV_CFG_STR in ${OSD_DEVICES}; do + local DEV_CFG_ARRAY=(${DEV_CFG_STR//|/ }) + + DISKID=$(echo "${DISKS}" | grep ${DEV_CFG_ARRAY[0]} | awk '{print $2}') + if [ -z "${DISKID}" ]; then + echo "No device named ${DEV_CFG_ARRAY[0]} on ${NODE}" + return 1 + fi + + TIERID=$(echo "${TIERS}" | grep ${DEV_CFG_ARRAY[1]} | awk '{print $2}') + if [ -z "${TIERID}" ]; then + echo "No storage tier named ${DEV_CFG_ARRAY[1]} present in the cluster" + return 1 + fi + + STORID=$(echo "${STORS}" | grep ${DISKID} | awk '{print $2}') + if [ -z "${STORID}" ]; then + CMD="system host-stor-add ${NODE} osd ${DISKID} --tier-uuid ${TIERID}" + if [ ${#DEV_CFG_ARRAY[@]} -gt 2 ]; then + CMD="$CMD --journal-size ${DEV_CFG_ARRAY[1]}" + fi + if [ ${#DEV_CFG_ARRAY[@]} -gt 3 ]; then + JOURNAL_DISK=$(echo "${DISKS}" | grep ${DEV_CFG_ARRAY[2]} | awk '{print $2}') + JOURNAL_STOR=$(echo "${STORS}" | grep ${JOURNAL_DISK} | awk '{print $2}') + CMD="${CMD} --journal-location ${JOURNAL_STOR}" + fi + log_command "${CMD}" + fi + done + + stage_complete "osd" ${NODE} + done + + return 0 +} + + +## Setup profiles band apply them. This steps exists purely to test +## this functionality to make sure that it is tested as frequently as +## possible. Applying a profile to the same node that was used to generate it +## should produce the exact same configuration. +## +function add_system_profiles { + for NODE in ${NODES} ${STORAGE_NODES}; do + + if is_stage_complete "profiles" ${NODE}; then + info "Skipping system profile creation for ${NODE}; already done" + continue + fi + + info "Adding and applying system profiles for ${NODE}" + + PROFILE="ifprofile-${NODE}" + ID=$(get_ifprofile_id ${PROFILE}) + if [ -z "${ID}" ]; then + log_command "system ifprofile-add ${PROFILE} ${NODE}" + log_command "system host-apply-ifprofile ${NODE} ${PROFILE}" + fi + + if [[ "${NODE}" == *"worker"* ]]; then + PROFILE="cpuprofile-${NODE}" + ID=$(get_cpuprofile_id ${PROFILE}) + if [ -z "${ID}" ]; then + log_command "system cpuprofile-add ${PROFILE} ${NODE}" + log_command "system host-apply-cpuprofile ${NODE} ${PROFILE}" + fi + + elif [[ "${NODE}" == *"storage"* ]]; then + PROFILE="storprofile-${NODE}" + ID=$(get_storprofile_id ${PROFILE}) + if [ -z "${ID}" ]; then + log_command "system storprofile-add ${PROFILE} ${NODE}" + ## We only support applying profiles to a newly installed board + ##log_command "system host-apply-storprofile ${NODE} ${PROFILE}" + fi + fi + + stage_complete "profiles" ${NODE} + done + + return 0 +} + + +## Setup common shared networking resources +## +function setup_internal_networks { + local TRANSPARENT_ARGS="" + local TRANSPARENT_INTERNAL_ARGS="" + local DHCPARGS="" + local VLANARGS="" + local PORT_SECURITY_ARGS="" + local QOS_ARGS="" + local VLANID=0 + local LIMIT=0 + local COUNT=0 + local NET=0 + local ID="" + + if is_stage_complete "internal_networks"; then + info "Skipping shared internal networks configuration; already done" + return 0 + fi + + if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + fi + + info "Adding shared internal networks" + + if [ "x${INTERNALNET_DHCP}" != "xyes" ]; then + DHCPARGS="--no-dhcp" + fi + + if [ "${NEUTRON_PORT_SECURITY}" == "True" ]; then + PORT_SECURITY_ARGS="--enable-port-security" + fi + + if [ "${NEUTRON_PORT_SECURITY}" == "False" ]; then + PORT_SECURITY_ARGS="--disable-port-security" + fi + + if [ "${VSWITCH_TYPE}" == "avs" ]; then + QOS_ARGS="--wrs-tm:qos $(get_qos_id ${EXTERNALQOS})" + fi + + ## Setup the shared external network + EXTERNAL_PROVIDER=$(get_external_provider_network) + ID=$(get_network_id ${EXTERNALNET}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} network create --project ${ADMINID} ${EXTERNAL_PROVIDER} --share --external ${EXTERNALNET} ${QOS_ARGS} ${PORT_SECURITY_ARGS}" + fi + + ID=$(get_subnet_id ${EXTERNALSUBNET}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} subnet create --project ${ADMINID} ${EXTERNALSUBNET} --gateway ${EXTERNALGWIP} --no-dhcp --network ${EXTERNALNET} --subnet-range ${EXTERNALCIDR} --ip-version ${EXTIPVERSION}" + fi + + if [ "${SHARED_TENANT_NETWORKS}" == "yes" -o "${EXTRA_NICS}" != "yes" ]; then + ## Internal networks are not required since VM instances will be + ## directly connected to tenant data networks see comment describing + ## the use of this variable. + stage_complete "internal_networks" + return 0 + fi + + LIMIT=$((NETCOUNT - 1)) + if [ "x${REUSE_NETWORKS}" == "xyes" ]; then + ## Only create a single internal network + LIMIT=1 + fi + + if [ "${FIRSTVLANID}" -ne "0" ]; then + INTERNALNETNAME=${INTERNALNET}${NET} + INTERNALSUBNETNAME=${INTERNALSUBNET}${NET} + INTERNALSUBNETNAME=${INTERNALSUBNET}${NET}-${VLANID} + + INTERNALNETID=$(get_network_id ${INTERNALNETNAME}) + if [ -z "${INTERNALNETID}" ]; then + INTERNAL_PROVIDER=$(get_internal_provider_network 0) + log_command "openstack ${REGION_OPTION} network create --project ${ADMINID} ${INTERNAL_PROVIDER} --share ${INTERNALNETNAME} ${QOS_ARGS} ${TRANSPARENT_INTERNAL_ARGS}" + fi + fi + + ## Setup the shared internal network(s) + local QOS_ARGS="" + if [ "${VSWITCH_TYPE}" == "avs" ]; then + QOS_ARGS="--wrs-tm:qos $(get_qos_id ${INTERNALQOS})" + fi + for I in $(seq 0 ${LIMIT}); do + NET=$((I / ${MAXVLANS})) + VLANID=$(((I % ${MAXVLANS}) + ${FIRSTVLANID})) + + INTERNALNETNAME=${INTERNALNET}${NET} + INTERNALSUBNETNAME=${INTERNALSUBNET}${NET} + if [ ${VLANID} -ne 0 ]; then + INTERNALNETNAME=${INTERNALNET}${NET}-${VLANID} + INTERNALSUBNETNAME=${INTERNALSUBNET}${NET}-${VLANID} + fi + + INTERNALNETID=$(get_network_id ${INTERNALNETNAME}) + if [ -z "${INTERNALNETID}" ]; then + INTERNAL_PROVIDER=$(get_internal_provider_network ${I}) + log_command "openstack ${REGION_OPTION} network create --project ${ADMINID} ${INTERNAL_PROVIDER} --share ${INTERNALNETNAME} ${QOS_ARGS} ${TRANSPARENT_INTERNAL_ARGS}" + fi + + SUBNET=10.${I}.${VLANID} + SUBNETCIDR=${SUBNET}.0/24 + + # The VM user data is setup to statically assign addresses to each VM + # instance so we need to make sure that any dynamic addresses (i.e., + # DHCP port addresses, Router addresses) are not in conflict with any + # addresses that are chosen by this script. WARNING: the IP addresses + # set in the user data will not correspond to the IP addresses selected + # by neutron. This used to be the case but in Newton we can no longer + # set the fixed_ip when booting on the internal network because it is + # shared and owned by the admin. We would need to create the port + # ahead of time, let the system pick an address, and then set the user + # data accordingly. That's too complicated to do for what we need. + POOLARGS="--allocation-pool start=${SUBNET}.128,end=${SUBNET}.254" + + ID=$(get_subnet_id ${INTERNALSUBNETNAME}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} subnet create --project ${ADMINID} ${INTERNALSUBNETNAME} ${DHCPARGS} ${POOLARGS} --gateway none --network ${INTERNALNETNAME} --subnet-range ${SUBNETCIDR}" + fi + COUNT=$((COUNT + 1)) + done + + stage_complete "internal_networks" + + return 0 +} + + +## Setup a management router +## +function setup_management_router { + local TENANT=$1 + local NAME=$2 + local EXTERNAL_NETWORK=$3 + local DVR_ENABLED=$4 + local EXTERNAL_SNAT=$5 + local TENANTNUM=$6 + + ID=$(get_router_id ${NAME}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} router create ${NAME}" + fi + + if [ "x${DVR_ENABLED}" == "xyes" ]; then + ## Switch to admin context and update the router to be distributed + source ${OPENRC} + if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + fi + + log_command "openstack ${REGION_OPTION} router set ${NAME} --disable" + log_command "openstack ${REGION_OPTION} router set ${NAME} --distributed" + fi + + source ${OPENRC} + if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + fi + # Neutron will allocate ip addresses non-sequentially so in + # order for us to get predictable ip addresses for our router gateway + # interfaces we must set them ourselves. We set it separately because + # the CLI does not support setting a fixed ip when updating the + # external_gateway_info dict. + log_command "openstack ${REGION_OPTION} router set ${NAME} --external-gateway ${EXTERNAL_NETWORK} --fixed-ip ip-address=$(ip_incr ${EXTERNALGWIP} $((TENANTNUM+1)))" + + if [ "x${EXTERNAL_SNAT}" == "xno" ]; then + ## Switch to admin context and update the router to disable SNAT + log_command "openstack ${REGION_OPTION} router set ${NAME} --external-gateway ${EXTERNAL_NETWORK} --disable-snat" + fi + + ## Switch back to tenant context + source ${HOME}/openrc.${TENANT} + + return 0 +} + +## Setup a single management subnet +## +function setup_management_subnet { + local TENANT=$1 + local NETWORK=$2 + local CIDR=$3 + local NAME=$4 + local ROUTER=$5 + local POOL=$6 + local PROVIDERARGS="" + local DNSARGS="" + local POOLARGS="" + local IPARGS="" + local OWNERARGS="" + local ELEVATE_CREDENTIALS="" + local RESTORE_CREDENTIALS="" + + ID=$(get_subnet_id ${NAME}) + if [ ! -z "${ID}" ]; then + ## already exists + return 0 + fi + + for NAMESERVER in ${DNSNAMESERVERS}; do + DNSARGS="${DNSARGS} --dns-nameserver ${NAMESERVER}" + done + + if [ ! -z "${POOL}" -a "${POOL}" != "none" ]; then + POOLARRAY=(${POOL//-/ }) + POOLARGS="--allocation-pool start=${POOLARRAY[0]},end=${POOLARRAY[1]}" + fi + + IPFIELDS=(${CIDR//\// }) + set +e + is_ipv4 ${IPFIELDS[0]} + IS_IPV4=$? + set -e + if [ ${IS_IPV4} -eq 1 ]; then + IPARGS="--ip-version=4" + else + IPARGS="--ip-version=6 --ipv6-address-mode=${MGMTIPV6ADDRMODE} --ipv6-ra-mode=${MGMTIPV6ADDRMODE}" + fi + + ${ELEVATE_CREDENTIALS} + + if [ "${OS_USERNAME}" == "admin" ]; then + OWNERARGS="--project=$(get_tenant_id ${TENANT})" + fi + + log_command "openstack ${REGION_OPTION} subnet create --network ${NETWORK} --subnet-range ${CIDR} ${NAME} ${OWNERARGS} ${DNSARGS} ${POOLARGS} ${IPARGS}" + + ${RESTORE_CREDENTIALS} + + log_command "openstack ${REGION_OPTION} router add subnet ${ROUTER} ${NAME}" + + return 0 +} + +## Setup management networks +## +function setup_management_networks { + local TENANTNUM=0 + local TENANT="" + local QOS_ARGS="" + local ID="" + + if is_stage_complete "management_networks"; then + info "Skipping management networks configuration; already done" + return 0 + fi + + info "Adding tenant management networks" + + for TENANT in ${TENANTS[@]}; do + local MGMTQOS="${TENANT}-mgmt-qos" + local MGMTROUTER="${TENANT}-router" + source ${HOME}/openrc.${TENANT} + + local EXTERNALNETID=$(get_network_id ${EXTERNALNET}) + if [ -z "${EXTERNALNETID}" ]; then + echo "Unable to get external network ${EXTERNALNET} for tenant ${TENANT}" + return 1 + fi + + if [ "${VSWITCH_TYPE}" == "avs" ]; then + local QOSID=$(get_qos_id ${MGMTQOS}) + if [ -z "${QOSID}" ]; then + echo "Unable to find QoS resource for ${MGMTQOS} for tenant ${TENANT}" + return 1 + else + QOS_ARGS="--wrs-tm:qos ${QOSID}" + fi + fi + + for I in $(seq 0 $((MGMTNETS-1))); do + local MGMTNET=$(get_mgmt_network_name ${TENANT}-mgmt-net ${I}) + local ID=$(get_network_id ${MGMTNET}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} network create ${MGMTNET} ${QOS_ARGS}" + fi + done + + setup_management_router ${TENANT} ${MGMTROUTER} ${EXTERNALNETID} ${MGMTDVR[${TENANTNUM}]} ${EXTERNALSNAT} ${TENANTNUM} + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Unable to setup mgmt router ${MGMTROUTER} for ${TENANT}" + return ${RET} + fi + + SUBNETS=${MGMTSUBNETS[${TENANTNUM}]} + if [ -z "${SUBNETS}" ]; then + echo "Unable to find any defined subnets for ${TENANT}" + return 1 + fi + + TMPSUBNETS=(${SUBNETS//,/ }) + SUBNETS_PER_NETWORK=$((${#TMPSUBNETS[@]} / ${MGMTNETS})) + REMAINDER=$((${#TMPSUBNETS[@]} % ${MGMTNETS})) + if [ ${REMAINDER} -ne 0 ]; then + echo "Number of subnets in SUBNETS must be a multiple of MGMTNETS=${MGMTNETS}" + return 1 + fi + + COUNT=0 + DATA=(${SUBNETS//,/ }) + for SUBNET in ${DATA[@]}; do + ARRAY=(${SUBNET//|/ }) + CIDR=${ARRAY[0]} + set +u + POOL=${ARRAY[1]} + set -u + + ## Distribute the subnets evenly across the number of mgmt networks + NETWORK=$(((COUNT / ${SUBNETS_PER_NETWORK}) % ${MGMTNETS})) + local MGMTNET=$(get_mgmt_network_name ${TENANT}-mgmt-net ${NETWORK}) + local MGMTSUBNET="${TENANT}-mgmt${NETWORK}-subnet${COUNT}" + setup_management_subnet ${TENANT} ${MGMTNET} ${CIDR} ${MGMTSUBNET} ${MGMTROUTER} "${POOL}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Unable to setup mgmt subnet ${MGMTNET} ${SUBNET} for tenant ${TENANT}" + return ${RET} + fi + + COUNT=$((COUNT+1)) + done + + TENANTNUM=$((TENANTNUM + 1)) + done + + stage_complete "management_networks" + + return 0 +} + + +DEDICATED_CPUS="hw:cpu_policy=dedicated" +SHARED_CPUS="hw:cpu_policy=shared" +DPDK_CPU="hw:cpu_model=${DPDK_VCPUMODEL}" + +if [ ${SHARED_PCPU} -ne 0 ]; then + SHARED_VCPU="hw:wrs:shared_vcpu=0" +else + SHARED_VCPU="" +fi + +if [ ${LOW_LATENCY} == "yes" ]; then + CPU_REALTIME="hw:cpu_realtime=yes" + CPU_REALTIME_MASK="hw:cpu_realtime_mask=^0" +else + CPU_REALTIME="" + CPU_REALTIME_MASK="" +fi + +## FIXME: these are not ported yet +##HEARTBEAT_ENABLED="guest-heartbeat=true" +HEARTBEAT_ENABLED="" + +function flavor_create { + local NAME=$1 + local ID=$2 + local MEM=$3 + local DISK=$4 + local CPU=$5 + + shift 5 + local USER_ARGS=$* + local DEFAULT_ARGS="hw:mem_page_size=2048" + + local X=$(get_flavor_id ${NAME}) + + if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + fi + + if [ -z "${X}" ]; then + log_command "nova ${REGION_OPTION} flavor-create ${NAME} ${ID} ${MEM} ${DISK} ${CPU}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to create flavor: ${NAME}" + exit ${RET} + fi + + log_command "nova ${REGION_OPTION} flavor-key ${NAME} set ${USER_ARGS} ${DEFAULT_ARGS}" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set ${NAME} extra specs: ${USER_ARGS} ${DEFAULT_ARGS}" + exit ${RET} + fi + fi + + return 0 +} + +## Setup flavors +## +function setup_all_flavors { + local FLAVORS="" + local FLAVOR="" + local HB_FLAVOR_SUFFIX=".hb" + local HB_FLAVOR_ARGS="${DEDICATED_CPUS} ${HEARTBEAT_ENABLED}" + local DPDK_FLAVOR_SUFFIX=".dpdk" + local DPDK_FLAVOR_ARGS="${DEDICATED_CPUS} ${SHARED_VCPU} ${DPDK_CPU}" + + local ID="" + + ## Create custom flavors (pinned on any numa node) + FLAVOR="small" + flavor_create ${FLAVOR} auto 512 ${IMAGE_SIZE} 1 ${DEDICATED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 512 ${IMAGE_SIZE} 1 ${HB_FLAVOR_ARGS} + + FLAVOR="medium" + flavor_create ${FLAVOR} auto 1024 ${IMAGE_SIZE} 2 ${DEDICATED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 1024 ${IMAGE_SIZE} 2 ${HB_FLAVOR_ARGS} + flavor_create ${FLAVOR}${DPDK_FLAVOR_SUFFIX} auto 1024 ${IMAGE_SIZE} 2 ${DPDK_FLAVOR_ARGS} + + FLAVOR="large" + flavor_create ${FLAVOR} auto 2048 ${IMAGE_SIZE} 3 ${DEDICATED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 2048 ${IMAGE_SIZE} 3 ${HB_FLAVOR_ARGS} + flavor_create ${FLAVOR}${DPDK_FLAVOR_SUFFIX} auto 2048 ${IMAGE_SIZE} 3 ${DPDK_FLAVOR_ARGS} + + FLAVOR="xlarge" + flavor_create ${FLAVOR} auto 4096 ${IMAGE_SIZE} 3 ${DEDICATED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 4096 ${IMAGE_SIZE} 3 ${HB_FLAVOR_ARGS} + flavor_create ${FLAVOR}${DPDK_FLAVOR_SUFFIX} auto 4096 ${IMAGE_SIZE} 3 ${DPDK_FLAVOR_ARGS} + + ## Create custom flavors (not pinned to any cpu) + FLAVOR="small.float" + flavor_create ${FLAVOR} auto 512 ${IMAGE_SIZE} 1 ${SHARED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 512 ${IMAGE_SIZE} 1 ${SHARED_CPUS} ${HEARTBEAT_ENABLED} + + FLAVOR="medium.float" + flavor_create ${FLAVOR} auto 1024 ${IMAGE_SIZE} 2 ${SHARED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 1024 ${IMAGE_SIZE} 2 ${SHARED_CPUS} ${HEARTBEAT_ENABLED} + + FLAVOR="large.float" + flavor_create ${FLAVOR} auto 2048 ${IMAGE_SIZE} 3 ${SHARED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 2048 ${IMAGE_SIZE} 3 ${SHARED_CPUS} ${HEARTBEAT_ENABLED} + + FLAVOR="xlarge.float" + flavor_create ${FLAVOR} auto 4096 ${IMAGE_SIZE} 3 ${SHARED_CPUS} + flavor_create ${FLAVOR}${HB_FLAVOR_SUFFIX} auto 4096 ${IMAGE_SIZE} 3 ${SHARED_CPUS} ${HEARTBEAT_ENABLED} + + for NODE in $(seq 0 $((NUMA_NODE_COUNT-1))); do + ## Create custom flavors (pinned on numa node) + if [ ${NODE} -eq 0 ]; then + DPDK_FLAVOR_ARGS="${DEDICATED_CPUS} ${SHARED_VCPU} ${DPDK_CPU}" + else + # A VM cannot span Numa nodes so disable the shared vcpu + DPDK_FLAVOR_ARGS="${DEDICATED_CPUS} ${DPDK_CPU}" + fi + PROCESSOR_ARGS="hw:numa_nodes=1 hw:numa_node.0=${NODE}" + + flavor_create "small.node${NODE}" auto 512 ${IMAGE_SIZE} 1 ${DEDICATED_CPUS} ${PROCESSOR_ARGS} + flavor_create "medium.node${NODE}" auto 1024 ${IMAGE_SIZE} 2 ${DEDICATED_CPUS} ${PROCESSOR_ARGS} + flavor_create "large.node${NODE}" auto 2048 ${IMAGE_SIZE} 3 ${DEDICATED_CPUS} ${PROCESSOR_ARGS} + flavor_create "xlarge.node${NODE}" auto 4096 ${IMAGE_SIZE} 3 ${DEDICATED_CPUS} ${PROCESSOR_ARGS} + flavor_create "medium.dpdk.node${NODE}" auto 1024 ${IMAGE_SIZE} 2 ${DPDK_FLAVOR_ARGS} ${PROCESSOR_ARGS} + flavor_create "large.dpdk.node${NODE}" auto 2048 ${IMAGE_SIZE} 3 ${DPDK_FLAVOR_ARGS} ${PROCESSOR_ARGS} + flavor_create "xlarge.dpdk.node${NODE}" auto 4096 ${IMAGE_SIZE} 3 ${DPDK_FLAVOR_ARGS} ${PROCESSOR_ARGS} + done + + return 0 +} + + +function setup_minimal_flavors { + local DPDK_FLAVOR_ARGS="${DEDICATED_CPUS} ${SHARED_VCPU} ${DPDK_CPU}" + + flavor_create "small" auto 512 ${IMAGE_SIZE} 1 ${DEDICATED_CPUS} + flavor_create "medium.dpdk" auto 1024 ${IMAGE_SIZE} 2 ${DPDK_FLAVOR_ARGS} + flavor_create "small.float" auto 512 ${IMAGE_SIZE} 1 ${SHARED_CPUS} + + return 0 +} + +function create_custom_flavors { + local FLAVORLIST=(${FLAVORS}) + local CUSTOM_NAME="" + local CUSTOM_ID="auto" + local CUSTOM_CORES=0 + local CUSTOM_MEM=0 + local CUSTOM_DISK=0 + local CUSTOM_DEDICATED="" + local CUSTOM_HEARTBEAT="" + local CUSTOM_NUMA0="" + local CUSTOM_NUMA1="" + local CUSTOM_NUMA_NODES="" + local CUSTOM_VCPUMODEL="" + local CUSTOM_SHARED_CPUS="" + local CUSTOM_NOVA_STORAGE="" + + if is_stage_complete "custom_flavors"; then + info "Skipping custom flavors; already done" + return 0 + fi + + info "Adding custom flavors" + + for INDEX in ${!FLAVORLIST[@]}; do + ENTRY=${FLAVORLIST[${INDEX}]} + DATA=(${ENTRY//|/ }) + CUSTOM_NAME=${DATA[0]} + log " Adding flavor ${ENTRY}" + + ## Remove the flavor name from the array + unset DATA[0] + DATA=(${DATA[@]}) + + FLAVOR_NAME="" + CUSTOM_CORES=1 + CUSTOM_ID="auto" + CUSTOM_MEM=1024 + CUSTOM_DISK=${IMAGE_SIZE} + CUSTOM_DEDICATED="" + CUSTOM_HEARTBEAT="" + CUSTOM_NUMA0="" + CUSTOM_NUMA1="" + CUSTOM_NUMA_NODES="" + CUSTOM_VCPUMODEL="" + CUSTOM_SHARED_CPUS="" + CUSTOM_NOVA_STORAGE="" + + for KEYVALUE in ${DATA[@]}; do + KEYVAL=(${KEYVALUE//=/ }) + if [ "x${KEYVAL[0]}" == "xdisk" ]; then + CUSTOM_DISK=${KEYVAL[1]} + elif [ "x${KEYVAL[0]}" == "xcores" ]; then + CUSTOM_CORES=${KEYVAL[1]} + elif [ "x${KEYVAL[0]}" == "xmem" ]; then + CUSTOM_MEM=${KEYVAL[1]} + elif [ "x${KEYVAL[0]}" == "xdedicated" ]; then + CUSTOM_DEDICATED="hw:cpu_policy=dedicated" + elif [ "x${KEYVAL[0]}" == "xheartbeat" ]; then + CUSTOM_HEARTBEAT="sw:wrs:guest:heartbeat=True" + elif [ "x${KEYVAL[0]}" == "xnuma_node.0" ]; then + CUSTOM_NUMA0="hw:numa_node.0=${KEYVAL[1]}" + elif [ "x${KEYVAL[0]}" == "xnuma_node.1" ]; then + CUSTOM_NUMA1="hw:numa_node.0=${KEYVAL[1]}" + elif [ "x${KEYVAL[0]}" == "xnuma_nodes" ]; then + CUSTOM_NUMA_NODES="hw:numa_nodes=${KEYVAL[1]}" + elif [ "x${KEYVAL[0]}" == "xvcpumodel" ]; then + CUSTOM_VCPUMODEL="hw:cpu_model=${KEYVAL[1]}" + elif [ "x${KEYVAL[0]}" == "xsharedcpus" ]; then + CUSTOM_SHARED_CPUS="hw:cpu_policy=shared" + elif [ "x${KEYVAL[0]}" == "xstorage" ]; then + CUSTOM_NOVA_STORAGE="aggregate_instance_extra_specs:storage=${KEYVAL[1]}" + fi + done + + flavor_create ${CUSTOM_NAME} ${CUSTOM_ID} ${CUSTOM_MEM} ${CUSTOM_DISK} ${CUSTOM_CORES} ${CUSTOM_HEARTBEAT} ${CUSTOM_DEDICATED} ${CUSTOM_VCPUMODEL} ${CUSTOM_SHARED_CPUS} ${CUSTOM_NUMA0} ${CUSTOM_NUMA1} ${CUSTOM_NUMA_NODES} ${CUSTOM_NOVA_STORAGE} + done + + stage_complete "custom_flavors" + + return 0 +} + + +## Setup flavors +## +function setup_flavors { + if is_stage_complete "flavors"; then + info "Skipping flavor configuration; already done" + return 0 + fi + + info "Adding ${FLAVOR_TYPES} VM flavors" + + if [ "x${FLAVOR_TYPES}" == "xall" ]; then + setup_all_flavors + RET=$? + elif [ "x${FLAVOR_TYPES}" == "xavs" ]; then + setup_avs_flavors + else + setup_minimal_flavors + RET=$? + fi + + stage_complete "flavors" + + return ${RET} +} + +## Setup keys +## +function setup_keys { + local PRIVKEY="${HOME}/.ssh/id_rsa" + local PUBKEY="${PRIVKEY}.pub" + local KEYNAME="" + local TENANT="" + local ID="" + + if is_stage_complete "public_keys"; then + info "Skipping public key configuration; already done" + return 0 + fi + + info "Adding VM public keys" + + if [ -f ${HOME}/id_rsa.pub ]; then + ## Use a user defined public key instead of the local user public key + ## if one is found in the home directory + PUBKEY="${HOME}/id_rsa.pub" + elif [ ! -f ${PRIVKEY} ]; then + log "Generating new SSH key pair for ${USER}" + ssh-keygen -q -N "" -f ${PRIVKEY} + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to generate SSH key pair" + return ${RET} + fi + fi + + for TENANT in ${TENANTS[@]}; do + source ${HOME}/openrc.${TENANT} + KEYNAME="keypair-${TENANT}" + ID=`nova ${REGION_OPTION} keypair-list | grep -E ${KEYNAME}[^0-9] | awk '{print $2}'` + if [ -z "${ID}" ]; then + log_command "nova ${REGION_OPTION} keypair-add --pub-key ${PUBKEY} ${KEYNAME}" + fi + done + + stage_complete "public_keys" + + return 0 +} + +## Setup floating IP addresses for each tenant +## +function setup_floating_ips { + local TENANT="" + local COUNT=0 + + if [ "${FLOATING_IP}" != "yes" ]; then + return 0 + fi + + if is_stage_complete "floating_ips"; then + info "Skipping floating IP address configuration; already done" + return 0 + fi + + info "Adding floating IP addresses" + + for TENANT in ${TENANTS[@]}; do + source ${HOME}/openrc.${TENANT} + COUNT=$(openstack ${REGION_OPTION} floating ip list | grep -E "[a-zA-Z0-9]{8}-" | wc -l) + if [ ${COUNT} -ge ${APPCOUNT} ]; then + continue + fi + for I in $(seq 1 $((APPCOUNT - ${COUNT}))); do + log_command "openstack ${REGION_OPTION} floating ip create ${EXTERNALNET}" + done + done + + stage_complete "floating_ips" + + return 0 +} + +## Create networks for Ixia +## +function setup_ixia_networks { + local TRANSPARENT_ARGS="" + local TENANTNUM=0 + local DHCPARGS="--no-dhcp" + local POOLARGS="" + + if is_stage_complete "ixia_networks"; then + info "Skipping tenant networks configuration; already done" + return 0 + fi + + if [ "${ROUTED_TENANT_NETWORKS}" == "no" ]; then + ## Not required. + return 0 + fi + + if [ "${SHARED_TENANT_NETWORKS}" == "no" ]; then + ## Not compatible + echo "SHARED_TENANT_NETWORKS must be set to \"yes\" for ROUTED_TENANT_NETWORKS" + return 1 + fi + + info "Adding Ixia networks" + + for TENANT in ${TENANTS[@]}; do + source ${HOME}/openrc.${TENANT} + local IXIANET="${TENANT}-ixia-net0" + local IXIASUBNET="${TENANT}-ixia-subnet0" + local IXIAROUTER="${TENANT}-ixia0" + local SUBNET=172.$((16 + ${TENANTNUM} * 2)).0 + local SUBNETCIDR=${SUBNET}.0/24 + local IXIAGWY=${SUBNET}.31 + + if [ "${VLAN_TRANSPARENT}" == "True" ]; then + TRANSPARENT_ARGS="--transparent-vlan" + fi + + ID=$(get_network_id ${IXIANET}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} network create ${IXIANET}" + fi + + ID=$(get_subnet_id ${IXIASUBNET}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} subnet create ${IXIASUBNET} ${DHCPARGS} ${POOLARGS} --network ${IXIANET} --subnet-range ${SUBNETCIDR}" + fi + + ID=$(get_router_id ${IXIAROUTER}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} router create ${IXIAROUTER}" + log_command "openstack ${REGION_OPTION} router add subnet ${IXIAROUTER} ${IXIASUBNET}" + fi + + TENANTNUM=$((TENANTNUM + 1)) + done + + stage_complete "ixia_networks" + + return 0 +} + +## Create networks for tenants +## +function setup_tenant_networks { + local TRANSPARENT_ARGS="" + local OWNERSHIP="" + local PROVIDERARGS="" + local SHAREDARGS="" + local DHCPARGS="" + local PORT_SECURITY_ARGS="" + local TENANT="" + local LIMIT=0 + local ID=0 + local TENANTNUM=0 + + if is_stage_complete "tenant_networks"; then + info "Skipping tenant networks configuration; already done" + return 0 + fi + + info "Adding tenant networks" + + if [ "${EXTRA_NICS}" != "yes" ]; then + stage_complete "tenant_networks" + return 0 + fi + + if [ "x${TENANTNET_DHCP}" != "xyes" ]; then + DHCPARGS="--no-dhcp" + fi + + if [ "${NEUTRON_PORT_SECURITY}" == "True" ]; then + PORT_SECURITY_ARGS="--enable-port-security" + fi + + if [ "${NEUTRON_PORT_SECURITY}" == "False" ]; then + PORT_SECURITY_ARGS="--disable-port-security" + fi + + for TENANT in ${TENANTS[@]}; do + source ${HOME}/openrc.${TENANT} + local TENANTNET="${TENANT}-net" + local TENANTSUBNET="${TENANT}-subnet" + + if [ "x${SHARED_TENANT_NETWORKS}" == "xyes" ]; then + source ${OPENRC} + OWNERSHIP="--project $(get_tenant_id ${TENANT})" + SHAREDARGS="--share" + fi + + LIMIT=$((NETCOUNT - 1)) + if [ "x${REUSE_NETWORKS}" == "xyes" ]; then + ## Create only a single network + LIMIT=0 + fi + + for I in $(seq 0 ${LIMIT}); do + GATEWAYARGS="--gateway none" + if [ "${ROUTED_TENANT_NETWORKS}" == "yes" ]; then + SUBNET=172.31.${I} + SUBNETCIDR=${SUBNET}.0/24 + PEERGWY=${SUBNET}.$((1 + (1 - ${TENANTNUM}))) + else + SUBNET=172.$((16 + ${TENANTNUM} * 2)).${I} + SUBNETCIDR=${SUBNET}.0/24 + fi + + # The nova boot commands are setup to statically assign + # addresses to each VM instance so we need to make sure that + # any dynamic addresses (i.e., DHCP port addresses) are not in + # conflict with any addresses that are chosen by this script. + POOLARGS="--allocation-pool start=${SUBNET}.128,end=${SUBNET}.254" + + TENANTNETID=$(get_network_id ${TENANTNET}${I}) + if [ -z "${TENANTNETID}" ]; then + log_command "openstack ${REGION_OPTION} network create ${OWNERSHIP} ${SHAREDARGS} ${TENANTNET}${I} ${PORT_SECURITY_ARGS}" + TENANTNETID=$(get_network_id ${TENANTNET}${I}) + fi + + ID=$(get_subnet_id ${TENANTSUBNET}${I}) + if [ -z "${ID}" ]; then + log_command "openstack ${REGION_OPTION} subnet create ${OWNERSHIP} ${TENANTSUBNET}${I} ${DHCPARGS} ${POOLARGS} ${GATEWAYARGS} --network ${TENANTNET}${I} --subnet-range ${SUBNETCIDR}" + + if [ "${ROUTED_TENANT_NETWORKS}" == "yes" ]; then + log_command "openstack ${REGION_OPTION} router add subnet ${TENANT}-ixia0 ${TENANTSUBNET}${I}" + for J in $(seq 1 ${IXIA_PORT_PAIRS}); do + ROUTES="" + PORT_OFFSET=$(((${J} - 1)*10)) + IXIAGWY=172.$((16 + ${TENANTNUM} * 2)).0.$((31 + ${PORT_OFFSET})) + + BASE=$(((100 * (${TENANTNUM} + 1)) + ${PORT_OFFSET})) + ROUTES="${ROUTES} --route destination=10.$((BASE + ${I})).0.0/24,gateway=${IXIAGWY}" + BASE=$(((100 * ((1 - ${TENANTNUM}) + 1)) + ${PORT_OFFSET})) + ROUTES="${ROUTES} --route destination=10.$((BASE + ${I})).0.0/24,gateway=${PEERGWY}" + log_command "openstack ${REGION_OPTION} router set ${TENANT}-ixia0 ${ROUTES}" + done + fi + fi + done + + TENANTNUM=$((TENANTNUM + 1)) + done + + stage_complete "tenant_networks" + + return 0 +} + +## Create a per-VM userdata file to setup the layer2 bridge test. The VM +## will be setup to bridge traffic between its 2nd and 3rd NIC +## +function create_layer2_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + + local USERDATA=${USERDATA_DIR}/${VMNAME}_userdata.txt + + BRIDGE_MTU=$(($TENANTMTU < $INTERNALMTU ? $TENANTMTU : $INTERNALMTU)) + + if [ "${NETTYPE}" == "kernel" ]; then + cat << EOF > ${USERDATA} +#wrs-config + +FUNCTIONS="bridge,${EXTRA_FUNCTIONS}" +LOW_LATENCY="${LOW_LATENCY}" +BRIDGE_PORTS="${DEFAULT_IF1},${DEFAULT_IF2}.${VLANID}" +BRIDGE_MTU="${BRIDGE_MTU}" +EOF + sed -i -e "s#\(BRIDGE_PORTS\)=.*#\1=\"${DEFAULT_IF1},${DEFAULT_IF2}.${VLANID}\"#g" ${USERDATA} + sed -i -e "s#\(FUNCTIONS\)=.*#\1=\"bridge\"#g" ${USERDATA} + else + NIC_DEVICE=$(get_guest_nic_device $VMTYPE) + + cat << EOF > ${USERDATA} +#wrs-config + +FUNCTIONS="hugepages,vswitch,${EXTRA_FUNCTIONS}" +LOW_LATENCY="${LOW_LATENCY}" +BRIDGE_PORTS="${DEFAULT_IF0},${DEFAULT_IF1}.${VLANID}" +BRIDGE_MTU="${BRIDGE_MTU}" +NIC_DEVICE="${NIC_DEVICE}" +EOF + if [ ! -z "${VSWITCH_ENGINE_IDLE_DELAY+x}" ]; then + echo "VSWITCH_ENGINE_IDLE_DELAY=${VSWITCH_ENGINE_IDLE_DELAY}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_MEM_SIZES+x}" ]; then + echo "VSWITCH_MEM_SIZES=${VSWITCH_MEM_SIZES}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_MBUF_POOL_SIZE+x}" ]; then + echo "VSWITCH_MBUF_POOL_SIZE=${VSWITCH_MBUF_POOL_SIZE}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_ENGINE_PRIORITY+x}" ]; then + echo "VSWITCH_ENGINE_PRIORITY=${VSWITCH_ENGINE_PRIORITY}" >> ${USERDATA} + fi + fi + + echo ${USERDATA} + return 0 +} + +## Create a per-VM userdata file to setup the layer3 routing test. The VM +## will be setup to route traffic between its 2nd and 3rd NIC according to the +## IP addresses and routes supplied in the ADDRESSES and ROUTES variables. +## +## +function create_layer3_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + local IFNAME1="${DEFAULT_IF1}" + local IFNAME2="${DEFAULT_IF2}" + + local USERDATA=${USERDATA_DIR}/${VMNAME}_userdata.txt + + if [ "${NETTYPE}" == "kernel" ]; then + local FUNCTIONS="routing,${EXTRA_FUNCTIONS}" + elif [ "${NETTYPE}" == "vswitch" ]; then + local FUNCTIONS="hugepages,avr,${EXTRA_FUNCTIONS}" + IFNAME1="${DEFAULT_IF0}" + IFNAME2="${DEFAULT_IF1}" + else + echo "layer3 user data for type=${NETTYPE} is not supported" + exit 1 + fi + + NIC_DEVICE=$(get_guest_nic_device $VMTYPE) + + if [ "0${VLANID}" -ne 0 ]; then + IFNAME2="${IFNAME2}.${VLANID}" + fi + + ## Setup static routes between IXIA -> VM0 -> VM1 -> IXIA for 4 Ixia static + ## subnets and 4 connected interface subnets. + ## + ## Static traffic will look like the following where the leading prefix (10) + ## will increment by 10 based on the HOSTNUMBER variable to allow different + ## ranges for each VM instance on the network: + ## 10.160.*.* -> 10.180.*.* + ## 10.170.*.* -> 10.190.*.* + ## + ## Connected interface traffic will look like this: + ## 172.16.*.* -> 172.18.*.* + ## 172.19.*.* -> 172.17.*.* + ## + ## The gateway addresses will look like this: + ## 172.16.*.{2,4,6...} -> 172.16.*.{1,3,5...} -> 10.1.*.{1,3,5...} -> + + ## | + ## 172.18.*.{2,4,6...} <- 172.18.*.{1,3,5...} <- 10.1.*.{2,4,6...} <- + + ## + ## + ## + PREFIX=$((10 * (1 + ${HOSTNUMBER}))) + MY_HOSTBYTE=$((1 + (${HOSTNUMBER} * 2))) + IXIA_HOSTBYTE=$((2 + (${HOSTNUMBER} * 2))) + + if [ "${SHARED_TENANT_NETWORKS}" == "yes" ]; then + # directly connected to both networks, therefore setup local addresses + # only and no routes for the internal network. + cat << EOF > ${USERDATA} +#wrs-config + +FUNCTIONS=${FUNCTIONS} +LOW_LATENCY="${LOW_LATENCY}" +NIC_DEVICE=${NIC_DEVICE} +ADDRESSES=( + "172.16.${NETNUMBER}.${MY_HOSTBYTE},255.255.255.0,${IFNAME1},${TENANTMTU}" + "172.18.${NETNUMBER}.${MY_HOSTBYTE},255.255.255.0,${IFNAME2},${TENANTMTU}" + ) +ROUTES=() +EOF + elif [ ${TENANTNUM} -eq 0 ]; then + MY_P2PBYTE=$((1 + (${HOSTNUMBER} * 2))) + PEER_P2PBYTE=$((2 + (${HOSTNUMBER} * 2))) + + cat << EOF > ${USERDATA} +#wrs-config + +FUNCTIONS=${FUNCTIONS} +LOW_LATENCY="${LOW_LATENCY}" +NIC_DEVICE=${NIC_DEVICE} +ADDRESSES=( + "172.16.${NETNUMBER}.${MY_HOSTBYTE},255.255.255.0,${IFNAME1},${TENANTMTU}" + "10.1.${NETNUMBER}.${MY_P2PBYTE},255.255.255.0,${IFNAME2},${INTERNALMTU}" + ) +ROUTES=( + "${PREFIX}.160.${NETNUMBER}.0/24,172.16.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.170.${NETNUMBER}.0/24,172.16.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.180.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "${PREFIX}.190.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.18.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.19.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + ) +EOF + + else + MY_P2PBYTE=$((2 + (${HOSTNUMBER} * 2))) + PEER_P2PBYTE=$((1 + (${HOSTNUMBER} * 2))) + + cat << EOF > ${USERDATA} +#wrs-config + +FUNCTIONS=${FUNCTIONS} +LOW_LATENCY="${LOW_LATENCY}" +NIC_DEVICE=${NIC_DEVICE} +ADDRESSES=( + "172.18.${NETNUMBER}.${MY_HOSTBYTE},255.255.255.0,${IFNAME1},${TENANTMTU}" + "10.1.${NETNUMBER}.${MY_P2PBYTE},255.255.255.0,${IFNAME2},${INTERNALMTU}" + ) +ROUTES=( + "${PREFIX}.180.${NETNUMBER}.0/24,172.18.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.190.${NETNUMBER}.0/24,172.18.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.160.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "${PREFIX}.170.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.16.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.17.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + ) +EOF + + fi + + if [ ! -z "${VSWITCH_ENGINE_IDLE_DELAY+x}" ]; then + echo "VSWITCH_ENGINE_IDLE_DELAY=${VSWITCH_ENGINE_IDLE_DELAY}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_MEM_SIZES+x}" ]; then + echo "VSWITCH_MEM_SIZES=${VSWITCH_MEM_SIZES}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_MBUF_POOL_SIZE+x}" ]; then + echo "VSWITCH_MBUF_POOL_SIZE=${VSWITCH_MBUF_POOL_SIZE}" >> ${USERDATA} + fi + if [ ! -z "${VSWITCH_ENGINE_PRIORITY+x}" ]; then + echo "VSWITCH_ENGINE_PRIORITY=${VSWITCH_ENGINE_PRIORITY}" >> ${USERDATA} + fi + + echo ${USERDATA} + return 0 +} + + +## Create a per-VM userdata file to setup the layer2 bridge test. The VM +## will be setup to bridge traffic between its 2nd and 3rd NIC +## +function create_layer2_centos_userdata { + local VMNAME=$1 + local NETTYPE=$2 + local TENANTNUM=$3 + local NETNUMBER=$4 + local HOSTNUMBER=$5 + local VLANID=$6 + local TENANTMTU=$7 + local INTERNALMTU=$8 + + local USERDATA=${USERDATA_DIR}/${VMNAME}_userdata.txt + + # Initially, just worry about enabling login + # TODO: Add networking at a later date + + cat << EOF > ${USERDATA} +#cloud-config +chpasswd: + list: | + root:root + centos:centos + expire: False +ssh_pwauth: True +EOF + + echo ${USERDATA} + return 0 +} + +## Create a per-VM userdata file to setup the layer3 routing test. The VM +## will be setup to route traffic between its 2nd and 3rd NIC according to the +## IP addresses and routes supplied in the ADDRESSES and ROUTES variables. +## +## +function create_layer3_centos_userdata { + local VMNAME=$1 + local NETTYPE=$2 + local TENANTNUM=$3 + local NETNUMBER=$4 + local HOSTNUMBER=$5 + local VLANID=$6 + local TENANTMTU=$7 + local INTERNALMTU=$8 + local IFNAME1="${DEFAULT_IF1}" + local IFNAME2="${DEFAULT_IF2}" + + local USERDATA=${USERDATA_DIR}/${VMNAME}_userdata.txt + + # Initially, just worry about enabling login + # TODO: Add networking at a later data + + cat << EOF > ${USERDATA} +#cloud-config +chpasswd: + list: | + root:root + centos:centos + expire: False +ssh_pwauth: True +EOF + + echo ${USERDATA} + return 0 +} + +## Create a per-VM userdata file to setup the networking in the guest +## according to the NETWORKING_TYPE variable. +## +function create_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + local IMAGE=$10 + + + if [ "x${IMAGE}" == "xcentos" -o "x${IMAGE}" == "xcentos_raw" ]; then + if [ "x${NETWORKING_TYPE}" == "xlayer3" ]; then + create_layer3_centos_userdata ${VMNAME} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} + else + create_layer2_centos_userdata ${VMNAME} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} + fi + else + if [ "x${NETWORKING_TYPE}" == "xlayer3" ]; then + create_layer3_userdata ${VMNAME} ${VMTYPE} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} + else + create_layer2_userdata ${VMNAME} ${VMTYPE} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} + fi + fi + + return $? +} + +## Create a per-VM userdata file to setup the layer2 bridge test. The VM +## will be setup to bridge traffic between its 2nd and 3rd NIC +## +function append_layer2_heat_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + local FILE=$10 + + BRIDGE_MTU=$(($TENANTMTU < $INTERNALMTU ? $TENANTMTU : $INTERNALMTU)) + + if [ "${NETTYPE}" == "kernel" ]; then + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #wrs-config + + FUNCTIONS="bridge,${EXTRA_FUNCTIONS}" + LOW_LATENCY="${LOW_LATENCY}" + BRIDGE_PORTS="${DEFAULT_IF1},${DEFAULT_IF2}.${VLANID}" + BRIDGE_MTU="${BRIDGE_MTU}" +EOF + sed -i -e "s#\(BRIDGE_PORTS\)=.*#\1=\"${DEFAULT_IF1},${DEFAULT_IF2}.${VLANID}\"#g" ${FILE} + sed -i -e "s#\(FUNCTIONS\)=.*#\1=\"bridge\"#g" ${FILE} + else + NIC_DEVICE=$(get_guest_nic_device $VMTYPE) + + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #wrs-config + + FUNCTIONS="hugepages,vswitch,${EXTRA_FUNCTIONS}" + LOW_LATENCY="${LOW_LATENCY}" + BRIDGE_PORTS="${DEFAULT_IF0},${DEFAULT_IF1}.${VLANID}" + BRIDGE_MTU="${BRIDGE_MTU}" + NIC_DEVICE="${NIC_DEVICE}" +EOF + if [ ! -z "${VSWITCH_ENGINE_IDLE_DELAY+x}" ]; then + echo " VSWITCH_ENGINE_IDLE_DELAY=${VSWITCH_ENGINE_IDLE_DELAY}" >> ${FILE} + fi + if [ ! -z "${VSWITCH_MEM_SIZES+x}" ]; then + echo " VSWITCH_MEM_SIZES=${VSWITCH_MEM_SIZES}" >> ${FILE} + fi + if [ ! -z "${VSWITCH_MBUF_POOL_SIZE+x}" ]; then + echo " VSWITCH_MBUF_POOL_SIZE=${VSWITCH_MBUF_POOL_SIZE}" >> ${FILE} + fi + if [ ! -z "${VSWITCH_ENGINE_PRIORITY+x}" ]; then + echo " VSWITCH_ENGINE_PRIORITY=${VSWITCH_ENGINE_PRIORITY}" >> ${FILE} + fi + fi + + return 0 +} + +## Create a per-VM userdata file to setup the layer3 routing test. The VM +## will be setup to route traffic between its 2nd and 3rd NIC according to the +## IP addresses and routes supplied in the ADDRESSES and ROUTES variables. +## +## +function append_layer3_heat_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + local FILE=$10 + local IFNAME1="${DEFAULT_IF1}" + local IFNAME2="${DEFAULT_IF2}" + + if [ "${NETTYPE}" == "kernel" ]; then + local FUNCTIONS="routing,${EXTRA_FUNCTIONS}" + elif [ "${NETTYPE}" == "vswitch" ]; then + local FUNCTIONS="hugepages,avr,${EXTRA_FUNCTIONS}" + IFNAME1="${DEFAULT_IF0}" + IFNAME2="${DEFAULT_IF1}" + else + echo "layer3 user data for type=${NETTYPE} is not supported" + exit 1 + fi + + NIC_DEVICE=$(get_guest_nic_device $VMTYPE) + + if [ "0${VLANID}" -ne 0 ]; then + IFNAME2="${IFNAME2}.${VLANID}" + fi + + ## Setup static routes between IXIA -> VM0 -> VM1 -> IXIA for 4 Ixia static + ## subnets and 4 connected interface subnets. + ## + ## See comment above for detailed explanation + ## + PREFIX=$((10 * (1 + ${HOSTNUMBER}))) + MY_HOSTBYTE=$((1 + (${HOSTNUMBER} * 2))) + IXIA_HOSTBYTE=$((2 + (${HOSTNUMBER} * 2))) + + if [ ${TENANTNUM} -eq 0 ]; then + MY_P2PBYTE=$((1 + (${HOSTNUMBER} * 2))) + PEER_P2PBYTE=$((2 + (${HOSTNUMBER} * 2))) + + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #wrs-config + + FUNCTIONS=${FUNCTIONS} + LOW_LATENCY="${LOW_LATENCY}" + NIC_DEVICE=${NIC_DEVICE} + ADDRESSES=( + "172.16.${NETNUMBER}.$((1 + (${HOSTNUMBER} * 2))),255.255.255.0,${IFNAME1},${TENANTMTU}" + "10.1.${NETNUMBER}.$((1 + (${HOSTNUMBER} * 2))),255.255.255.0,${IFNAME2},${INTERNALMTU}" + ) + ROUTES=( + "${PREFIX}.160.${NETNUMBER}.0/24,172.16.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.170.${NETNUMBER}.0/24,172.16.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.180.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "${PREFIX}.190.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.18.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.19.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + ) +EOF + else + MY_P2PBYTE=$((2 + (${HOSTNUMBER} * 2))) + PEER_P2PBYTE=$((1 + (${HOSTNUMBER} * 2))) + + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #wrs-config + + FUNCTIONS=${FUNCTIONS} + LOW_LATENCY="${LOW_LATENCY}" + NIC_DEVICE=${NIC_DEVICE} + ADDRESSES=( + "172.18.${NETNUMBER}.$((1 + (${HOSTNUMBER} * 2))),255.255.255.0,${IFNAME1},${TENANTMTU}" + "10.1.${NETNUMBER}.$((2 + (${HOSTNUMBER} * 2))),255.255.255.0,${IFNAME2},${INTERNALMTU}" + ) + ROUTES=( + "${PREFIX}.180.${NETNUMBER}.0/24,172.18.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.190.${NETNUMBER}.0/24,172.18.${NETNUMBER}.${IXIA_HOSTBYTE},${IFNAME1}" + "${PREFIX}.160.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "${PREFIX}.170.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.16.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + "172.17.${NETNUMBER}.0/24,10.1.${NETNUMBER}.${PEER_P2PBYTE},${IFNAME2}" + ) +EOF + fi + + return 0 +} + + +## Create a per-VM userdata file to setup the layer2 bridge test. The VM +## will be setup to bridge traffic between its 2nd and 3rd NIC +## +function append_layer2_heat_centos_userdata { + local VMNAME=$1 + local NETTYPE=$2 + local TENANTNUM=$3 + local NETNUMBER=$4 + local HOSTNUMBER=$5 + local VLANID=$6 + local TENANTMTU=$7 + local INTERNALMTU=$8 + local FILE=$9 + + # Initially, just worry about enabling login + # TODO: Add networking at a later data + + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #cloud-config + chpasswd: + list: | + root:root + centos:centos + expire: False + ssh_pwauth: True +EOF + + return 0 +} + +## Create a per-VM userdata file to setup the layer3 routing test. The VM +## will be setup to route traffic between its 2nd and 3rd NIC according to the +## IP addresses and routes supplied in the ADDRESSES and ROUTES variables. +## +## +function append_layer3_heat_centos_userdata { + local VMNAME=$1 + local NETTYPE=$2 + local TENANTNUM=$3 + local NETNUMBER=$4 + local HOSTNUMBER=$5 + local VLANID=$6 + local TENANTMTU=$7 + local INTERNALMTU=$8 + local FILE=$9 + local IFNAME1="${DEFAULT_IF1}" + local IFNAME2="${DEFAULT_IF2}" + + # Initially, just worry about enabling login + # TODO: Add networking at a later data + + cat << EOF >> ${FILE} + user_data_format: 'RAW' + user_data: + Fn::Base64: + Fn::Replace: + - 'OS::stack_name': {Ref: 'OS::stack_name'} + - | + #cloud-config + chpasswd: + list: | + root:root + centos:centos + expire: False + ssh_pwauth: True +EOF + + return 0 +} + +## Create a per-VM userdata file to setup the networking in the guest +## according to the NETWORKING_TYPE variable. +## +function append_heat_userdata { + local VMNAME=$1 + local VMTYPE=$2 + local NETTYPE=$3 + local TENANTNUM=$4 + local NETNUMBER=$5 + local HOSTNUMBER=$6 + local VLANID=$7 + local TENANTMTU=$8 + local INTERNALMTU=$9 + local FILE=$10 + local IMAGE=$11 + + if [ "x${IMAGE}" == "xcentos" -o "x${IMAGE}" == "xcentos_raw" ]; then + if [ "x${NETWORKING_TYPE}" == "xlayer3" ]; then + append_layer3_heat_centos_userdata ${VMNAME} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} ${FILE} + else + append_layer2_heat_centos_userdata ${VMNAME} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} ${FILE} + fi + else + if [ "x${NETWORKING_TYPE}" == "xlayer3" ]; then + append_layer3_heat_userdata ${VMNAME} ${VMTYPE} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} ${FILE} + else + append_layer2_heat_userdata ${VMNAME} ${VMTYPE} ${NETTYPE} ${TENANTNUM} ${NETNUMBER} ${HOSTNUMBER} ${VLANID} ${TENANTMTU} ${INTERNALMTU} ${FILE} + fi + fi + + return 0 +} + +## Setup the image arguments for the nova boot command according to whether +## the user wants to use cinder volumes or glance images. +## +function create_image_args { + local VMNAME=$1 + local IMAGE=$2 + local BOOT_SOURCE=$3 + + if [ "x${BOOT_SOURCE}" == "xglance" ]; then + echo "--image=${IMAGE}" + else + # Added cinder id to launch file so now just use \$CINDER_ID + echo "--block-device-mapping vda=\${CINDER_ID}:::0" + fi + return 0 +} + +## Create a file to later add boot commands +## +function create_boot_command_file { + local TENANT=$1 + local VMNAME=$2 + local RESULT=$3 + local FILE=${BOOTDIR}/launch_${VMNAME}.sh + + cat << EOF > ${FILE} +#!/bin/bash +# +source ${HOME}/openrc.${TENANT} +EOF + chmod 755 ${FILE} + + eval "$RESULT=${FILE}" + return 0 +} + +## Create a file to later add yaml statements +## +function create_heat_yaml_file { + local TENANT=$1 + local VMNAME=$2 + local RESULT=$3 + local FILE=${BOOTDIR}/heat_${VMNAME}.yaml + + cat << EOF > ${FILE} +heat_template_version: 2013-05-23 + +description: > + Creates specified VMs from lab_setup.sh + +parameters: + +resources: + +EOF + chmod 755 ${FILE} + + eval "$RESULT=${FILE}" + return 0 +} + +## Add Heat Parameters to a file +## +#function write_heat_parameter_commands +## Add Heat resources to a file +## +function write_heat_resource_commands { + local VMNAME=${1} + local VOLNAME=${2} + local IMAGE=${3} + local BOOT_SOURCE=${4} + local SIZE=${5} + local FLAVOR=${6} + local FLAVOR_MODIFIER=${7} + local IP=${8} + local MGMTNETID=${9} + local MGMTVIF=${10} + local TENANTNETID=${11} + local TENANTVIF=${12} + local TENANTIP=${13} + local INTERNALNETID=${14} + local INTERNALVIF=${15} + local FILE=${16} + + VMNAME_UNDERSCORES=$(echo "${VMNAME}" | sed -e 's/-/_/g') + + local GLANCE_ID=$(get_glance_id ${IMAGE}) + if [ -z "${GLANCE_ID}" ]; then + echo "No glance image with name: ${IMAGE}" + return 1 + fi + + if [ "x${BOOT_SOURCE}" == "xglance" ]; then + + cat << EOF > ${FILE} + + ${VMNAME_UNDERSCORES}: + type: OS::Nova::Server + properties: + name: ${VMNAME} + flavor: ${FLAVOR} + image: ${IMAGE} + networks: + - {uuid: ${MGMTNETID}, vif-model: ${MGMTVIF} } + - {uuid: ${TENANTNETID}, fixed_ip: ${TENANTIP}, vif-model: ${TENANTVIF} } + - {uuid: ${INTERNALNETID}, vif-model: ${INTERNALVIF} } +EOF + + else + VOLNAME_UNDERSCORES=$(echo "${VOLNAME}" | sed -e 's/-/_/g') + + cat << EOF > ${FILE} + + ${VOLNAME_UNDERSCORES}: + type: OS::Cinder::Volume + properties: + name: heat_vol_${VOLNAME} + image: ${IMAGE} + size: ${SIZE} + + ${VMNAME_UNDERSCORES}: + type: OS::Nova::Server + properties: + name: ${VMNAME} + flavor: ${FLAVOR} + block_device_mapping: + - {device_name: vda, volume_id: { get_resource: ${VOLNAME_UNDERSCORES} } } + networks: + - {uuid: ${MGMTNETID}, vif-model: ${MGMTVIF} } + - {uuid: ${TENANTNETID}, fixed_ip: ${TENANTIP}, vif-model: ${TENANTVIF} } + - {uuid: ${INTERNALNETID}, vif-model: ${INTERNALVIF} } +EOF + fi + + return 0 +} + + +## Add volume creation to launch file +## +function write_cinder_command { + local NAME=$1 + local IMAGE=$2 + local SIZE=$3 + local BOOT_SOURCE=$4 + local FILE=$5 + + # Don't do anything if using glance instead of cinder + if [ "x${BOOT_SOURCE}" == "xglance" ]; then + return 0 + fi + + local GLANCE_ID=$(get_glance_id ${IMAGE}) + if [ -z "${GLANCE_ID}" ]; then + echo "No glance image with name: ${IMAGE}" + return 1 + fi + + cat << EOF >> ${FILE} +# Allow disk size override for testing +SIZE=\${3:-${SIZE}} + +CINDER_ID=\$(cinder ${REGION_OPTION} list | grep "vol-${NAME} " | awk '{print \$2}') +if [ -z "\${CINDER_ID}" ]; then + cinder ${REGION_OPTION} create --image-id ${GLANCE_ID} --display-name=vol-${NAME} \${SIZE} + RET=\$? + if [ \${RET} -ne 0 ]; then + echo "Failed to create cinder volume 'vol-${NAME}'" + exit + fi + + # Wait up to one minute for the volume to be created + echo "Creating volume 'vol-${NAME}'" + DELAY=0 + while [ \$DELAY -lt ${CINDER_TIMEOUT} ]; do + STATUS=\$(cinder ${REGION_OPTION} show vol-${NAME} 2>/dev/null | awk '{ if (\$2 == "status") {print \$4} }') + if [ \${STATUS} == "downloading" -o \${STATUS} == "creating" ]; then + DELAY=\$((DELAY + 5)) + sleep 5 + elif [ \${STATUS} == "available" ]; then + break + else + echo "Volume Create Failed" + exit + fi + done + + if [ \${STATUS} == "available" ]; then + echo "Volume Created" + else + echo "Timed out waiting for volume creation" + fi +fi +CINDER_ID=\$(cinder ${REGION_OPTION} show vol-${NAME} 2>/dev/null | awk '{ if (\$2 == "id") {print \$4} }') + +EOF + return 0 +} + +## Append commands to create vlan trunks to boot scripts +## +function write_trunk_commands { + local FILE=$1 + local INSTANCE=$2 + local INSTANCE_VLANID=$3 + + cat << EOF >> ${FILE} +VMNAME=$INSTANCE +NIC_INDEX=0 +nova ${REGION_OPTION} show \$VMNAME|grep nic| while read -r NIC; do + PARENTPORT=\`echo \$NIC|egrep -o '"port_id": "[[:alnum:]-]*"'|egrep -o "[[:alnum:]-]*"|tail -n 1\` + PARENTMAC=\`echo \$NIC|egrep -o '"mac_address": "[[:alnum:]:]*"'|egrep -o "[[:alnum:]:]*"|tail -n 1\` + PARENTNETWORK=\`echo \$NIC|egrep -o '"network": "[[:alnum:]-]*"'|egrep -o "[[:alnum:]-]*"|tail -n 1\` + + SUBPORT_INDEX=0 + for VLAN_NETWORK in \`openstack ${REGION_OPTION} network list -c Name|grep "\${PARENTNETWORK}-"\`; do + VLAN_NETWORK_NAME=\`echo \$VLAN_NETWORK |egrep -o "[[:alnum:]-]*net[[:alnum:]]*-[[:alnum:]]*"\` + if [ "x\$VLAN_NETWORK_NAME" == "x" ]; then + continue + fi + VLANID=\`echo \$VLAN_NETWORK|egrep -o "\-[0-9]"|tail -n 1|egrep -o "[0-9]"\` + if [ "\$VLANID" -ne "$INSTANCE_VLANID" ]; then + continue + fi + if [ "\$SUBPORT_INDEX" -eq "0" ]; then + openstack ${REGION_OPTION} network trunk create \$VMNAME-trunk\$NIC_INDEX --parent-port \$PARENTPORT + fi + openstack ${REGION_OPTION} port create --network \$VLAN_NETWORK_NAME \$VMNAME-trunk\$NIC_INDEX-port\$SUBPORT_INDEX + echo "source /etc/platform/openrc; openstack ${REGION_OPTION} port set \$VMNAME-trunk\$NIC_INDEX-port\$SUBPORT_INDEX --mac-address \$PARENTMAC"|bash + openstack ${REGION_OPTION} network trunk set \$VMNAME-trunk\$NIC_INDEX --subport port=\$VMNAME-trunk\$NIC_INDEX-port\$SUBPORT_INDEX,segmentation-type=vlan,segmentation-id=\$VLANID + SUBPORT_INDEX=\$((SUBPORT_INDEX+1)) + done + NIC_INDEX=\$((NIC_INDEX+1)) +done +EOF +} + +## Write the bash commands to launch a VM and assign a floating IP +## +function write_boot_command { + local CMD=$1 + local VMNAME=$2 + local FLAVOR=$3 + local FLAVOR_MODIFIER=$4 + local FIPID=$5 + local FILE=$6 + local VLANID=$7 + + cat << EOF >> ${FILE} +FLAVOR=\${1:-${FLAVOR}} +FLAVOR_MODIFIER=\${2:-${FLAVOR_MODIFIER}} + +if [ ! -z \${FLAVOR_MODIFIER} ]; then + FLAVOR_MODIFIER=".\${FLAVOR_MODIFIER}" +fi + +INFO=\$(nova ${REGION_OPTION} show ${VMNAME} &>> /dev/null) +RET=\$? +if [ \${RET} -ne 0 ]; then + ${CMD} + RET=\$? +EOF + if [ "${VLAN_TRANSPARENT_INTERNAL_NETWORKS}" == "False" ]; then + write_trunk_commands ${FILE} ${VMNAME} ${VLANID} + fi + + cat << EOF >> ${FILE} +fi +EOF + if [ "x${FLOATING_IP}" == "xyes" ]; then + cat << EOF >> ${FILE} +if [ \${RET} -eq 0 ]; then + FIXED_ADDRESS= + PORT_ID= + RETRY=30 + while [ -z "\${FIXED_ADDRESS}" -a \${RETRY} -ne 0 ]; do + FIXED_ADDRESS=\$(echo \${INFO} | sed -e 's#^.*192.168#192.168#' -e 's#[, ].*##g') + PORT_ID=\$(echo "\${INFO}" | grep "nic.*mgmt-net" | grep -Eo "[0-9a-zA-Z]{8}-[0-9a-zA-Z]{4}-[0-9a-zA-Z]{4}-[0-9a-zA-Z]{4}-[0-9a-zA-Z]{12}") + if [ -z \${FIXED_ADDRESS} ]; then + sleep 2 + INFO=\$(nova ${REGION_OPTION} show ${VMNAME}) + fi + RETRY=\$((RETRY-1)) + done + if [ -z \${FIXED_ADDRESS} ]; then + echo "Could not determine fixed address of ${VMNAME}" + exit 1 + fi + openstack ${REGION_OPTION} floating ip set ${FIPID} --port \${PORT_ID} --fixed-ip-address \${FIXED_ADDRESS} + RET=\$? + +fi + +exit \${RET} +EOF + + else + cat << EOF >> ${FILE} +exit \${RET} +EOF + + fi + + return 0 +} + +## Output all wrapper scripts that do not have variable content. +function create_heat_script_files { + info "Writing Heat Scripts to: ${HEATSCRIPT}" + + ## The global file runs each individual tenant file + GLOBAL_FILENAME=${HEATSCRIPT} + echo "#!/bin/bash -e" > ${GLOBAL_FILENAME} + + for TENANT in ${TENANTS[@]}; do + ## The tenant file runs each VMTYPE file for the tenant + TENANT_FILENAME=${BOOTDIR}/heat_${TENANT}.sh + TENANT_HEAT_NAME=heat_${TENANT}-instances + echo "#!/bin/bash -e" > ${TENANT_FILENAME} + echo "source ${HOME}/openrc.${TENANT}" >> ${TENANT_FILENAME} + echo "heat stack-create -f ${BOOTDIR}/${TENANT_HEAT_NAME}.yaml ${TENANT_HEAT_NAME}" >> ${TENANT_FILENAME} + echo "exit 0" >> ${TENANT_FILENAME} + chmod 755 ${TENANT_FILENAME} + + for INDEX in ${!APPTYPES[@]}; do + APPTYPE=${APPTYPES[${INDEX}]} + VMTYPE=${VMTYPES[${INDEX}]} + ## The tenant APPTYPE file runs one VMTYPE for the tenant + APPTYPE_FILENAME=${BOOTDIR}/heat_${TENANT}-${VMTYPE}-instances.sh + APPTYPE_HEAT_NAME=heat_${TENANT}-${VMTYPE}-instances + echo "#!/bin/bash -e" > ${APPTYPE_FILENAME} + echo "source ${HOME}/openrc.${TENANT}" >> ${APPTYPE_FILENAME} + echo "heat stack-create -f ${BOOTDIR}/${APPTYPE_HEAT_NAME}.yaml ${APPTYPE_HEAT_NAME}" >> ${APPTYPE_FILENAME} + echo "exit 0" >> ${APPTYPE_FILENAME} + chmod 755 ${APPTYPE_FILENAME} + done + + echo "${TENANT_FILENAME}" >> ${GLOBAL_FILENAME} + done + + echo "exit 0" >> ${GLOBAL_FILENAME} + chmod 755 ${GLOBAL_FILENAME} + + for INDEX in ${!APPTYPES[@]}; do + APPTYPE=${APPTYPES[${INDEX}]} + VMTYPE=${VMTYPES[${INDEX}]} + ## Then APPTYPE file runs all APPTYPE files for all tenants + APPTYPE_FILENAME=${BOOTDIR}/heat_${VMTYPE}_instances.sh + echo "#!/bin/bash -e" > ${APPTYPE_FILENAME} + + for TENANT in ${TENANTS[@]}; do + APPTYPE_HEAT_NAME=heat_${TENANT}-${VMTYPE}-instances + echo "${BOOTDIR}/${APPTYPE_HEAT_NAME}.sh" >> ${APPTYPE_FILENAME} + done + + echo "exit 0" >> ${APPTYPE_FILENAME} + chmod 755 ${APPTYPE_FILENAME} + done + + return 0 +} + + +## Setup local storage on a single node +## +function setup_local_storage { + local NODE=$1 + local MODE=$2 + local PVS=$3 + local LV_CALC_MODE=$4 + local LV_SIZE=$5 + + if is_stage_complete "local_storage" ${NODE}; then + info "Skipping local storage configuration for ${NODE}; already done" + return 0 + fi + + # Special Case: Small system controller-0 (after running lab_cleanup) + if [[ "${SMALL_SYSTEM}" == "yes" && "${NODE}" == "controller-0" ]]; then + local NOVA_VG=$(system host-lvg-list ${NODE} ${CLI_NOWRAP} | awk '{if ($4 == "nova-local" && $6 == "provisioned") print $4}') + if [ "${NOVA_VG}" == "nova-local" ]; then + info "Skipping local storage configuration for ${NODE}; already done" + return 0 + fi + fi + + # Validate parameters: mode + if [[ ${MODE} == local* || "${MODE}" == "remote" ]]; then + info "Adding nova storage; Instance disks backed by ${MODE} storage for ${NODE}." + else + echo "ERROR: mode storage setting for ${NODE} is uknown: ${MODE}" + return 3 + fi + + # Validate parameters: make sure that at least one physical volume will be added + if [[ "${PVS}" == "none" ]]; then + echo "ERROR: No physical volumes are specified for ${NODE}" + return 3 + fi + + + # Validate parameters: lv_calc_mode and lv_fixed_size + if [ "${LV_CALC_MODE}" == "fixed" ]; then + if [[ "${MODE}" == "local_lvm" && "${LV_SIZE}" == "0" ]]; then + echo "ERROR: lv_fixed_size storage setting for ${NODE} is invalid: ${LV_SIZE}" + return 3 + fi + else + echo "ERROR: lv_calc_mode storage setting for ${NODE} is uknown: ${LV_CALC_MODE}" + return 3 + fi + + # Volume Group: Create the the nova-local volume group + local NOVA_VG=$(system host-lvg-list ${NODE} ${CLI_NOWRAP} | awk '{if ($4 == "nova-local" && ($6 == "provisioned" || $6 ~ /adding/)) print $4}') + if [ -z "${NOVA_VG}" ]; then + log_command "system host-lvg-add ${NODE} nova-local" + fi + + # Physical Volumes: Process the devices (disks and/or partitions) + if [ "${PVS}" != "none" ]; then + local DEVICE_ARRAY=(${PVS//,/ }) + for DINDEX in ${!DEVICE_ARRAY[@]}; do + local DEVICE_UUID=$(system host-disk-list ${NODE} ${CLI_NOWRAP} | grep ${DEVICE_ARRAY[${DINDEX}]} | awk '{print $2}') + + if [ "${DEVICE_UUID}" == "" ]; then + log "PV is not a disk, looking for a partition" + local DEVICE_UUID=$(system host-disk-partition-list ${NODE} ${CLI_NOWRAP} | grep ${DEVICE_ARRAY[${DINDEX}]} | awk '{print $2}') + if [ "${DEVICE_UUID}" == "" ]; then + echo "ERROR: could not find the device (${DEVICE_ARRAY[${DINDEX}]}) UUID for ${NODE}" + return 4 + fi + fi + + # Add the addition physical volume + local NOVA_PV=$(system host-pv-list ${NODE} ${CLI_NOWRAP} | grep nova-local | grep ${DEVICE_UUID} | awk '{if ($12 == "provisioned" || $12 == "adding") print $4}') + if [ -z ${NOVA_PV} ];then + log_command "system host-pv-add ${NODE} nova-local ${DEVICE_UUID}" + fi + done + fi + + # Set instances LV parameter + if [ "${MODE}" == "local_lvm" ]; then + log_command "system host-lvg-modify ${NODE} nova-local -b lvm -s ${LV_SIZE}" + elif [ "${MODE}" == "local_image" ]; then + log_command "system host-lvg-modify ${NODE} nova-local -b image" + elif [ "${MODE}" == "remote" ]; then + log_command "system host-lvg-modify ${NODE} nova-local -b remote" + fi + + stage_complete "local_storage" ${NODE} + + return 0 +} + + +## Setup local storage on nodes +## +function add_local_storage { + local NODE="" + + for NODE in ${NODES}; do + if [[ "${SMALL_SYSTEM}" != "yes" && $NODE == *"controller"* ]]; then + continue + fi + local SETTINGS_STRING=$(get_node_variable ${NODE} LOCAL_STORAGE) + local FIELD_SEPARATORS="${SETTINGS_STRING//[^|]}" + if [ ${#FIELD_SEPARATORS} -ne 3 ]; then + echo "Local storage settings for host ${NODE} has an invalid format: Incorrect number of fields" + echo " ${SETTINGS_STRING}" + return 2 + fi + + local SETTINGS_ARRAY=(${SETTINGS_STRING//|/ }) + if [ ${#SETTINGS_ARRAY[@]} == 4 ]; then + setup_local_storage ${NODE} ${SETTINGS_ARRAY[@]} + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup local storage for ${NODE}" + return ${RET} + fi + else + echo "Local storage settings for host ${NODE} has an invalid format: One or more fields are empty" + echo " ${SETTINGS_STRING}" + return 2 + fi + done + + return 0 +} + + +## Setup cinder device on controller +## +function setup_cinder_device { + local NODE=$1 + local CINDER_SETTINGS=$2 + + if is_stage_complete "cinder_device" ${NODE}; then + info "Skipping cinder device creation for ${NODE}; already done" + return 0 + fi + + # Special case: after lab_cleanup we may already have cinder provisioned. + local CINDER_VG=$(system host-lvg-list ${NODE} ${CLI_NOWRAP} | awk '{if ($4 == "cinder-volumes") print $4}') + if [[ "${CINDER_VG}" == *"cinder-volumes"* ]]; then + info "Skipping cinder device creation for ${NODE}; already done" + return 0 + fi + + info "Adding cinder device for ${NODE}" + + # Validate config + for DEV_CFG_STR in ${CINDER_SETTINGS}; do + local DEV_CFG_ARRAY=(${DEV_CFG_STR//|/ }) + if [ ${#DEV_CFG_ARRAY[@]} -ne 1 ]; then + echo "Cinder device settings for host ${NODE} has an invalid format: Incorrect number of fields" + echo " ${DEV_CFG_STR}" + return 1 + fi + done + + for DEV_CFG_STR in ${CINDER_SETTINGS}; do + local DEV_CFG_ARRAY=(${DEV_CFG_STR//|/ }) + PARTITION=${DEV_CFG_ARRAY[0]} + + PARTITIONS=$(system host-disk-partition-list ${NODE} ${CLI_NOWRAP}) + PARTITION_UUID=$(echo "${PARTITIONS}" | grep ${PARTITION} | awk '{print $2}') + if [ -z "${PARTITION_UUID}" ]; then + echo "No device named ${PARTITION} on ${NODE}" + return 2 + fi + + local CINDER_VG=$(system host-lvg-list ${NODE} ${CLI_NOWRAP} | awk '{if ($4 == "cinder-volumes" && ($6 == "provisioned" || $6 ~ /adding/)) print $4}') + if [ -z "${CINDER_VG}" ]; then + log_command "system host-lvg-add ${NODE} cinder-volumes" + fi + + CMD="system host-pv-add ${NODE} cinder-volumes ${PARTITION_UUID}" + log_command "${CMD}" + done + + stage_complete "cinder_device" ${NODE} + + return 0 +} + +## Partition Settings. +## +## _PARTITIONS=",[partition1_size_gib, partition2_size_gib]|,[partition1_size_gib]" +## +## Examples: +## - create three partitions on controller-0: +## -> two 144 GiB partitions on /dev/disk/by-path/pci-0000:00:17.0-ata-3.0 +## -> one 128 GiB partition on /dev/disk/by-path/pci-0000:00:17.0-ata-2.0 +## CONTROLLER0_PARTITIONS="/dev/disk/by-path/pci-0000:00:17.0-ata-3.0,[144,144]|/dev/disk/by-path/pci-0000:00:17.0-ata-2.0,[128]" +## +## - create one partition on worker-1: +## -> one 419 GiB partition on /dev/disk/by-path/pci-0000:00:1f.2-ata-2.0 +## WORKER1_PARTITIONS="/dev/disk/by-path/pci-0000:00:1f.2-ata-2.0,[419]" +function setup_partitions { + NODE=$1 + DISK_DEVICE_PATH=$2 + # this is how you get a paramater that is passed as an array + declare -a partitionArgArray=("${!3}") + + # 180 but longer in vbox. That's because there is another set of manifests already applying. + # The partition goes to "Ready" eventually but lab_setup timeout. + local PARTITION_CREATE_TIMEOUT=360 + + local DISK_UUID="" + + # Disk search up to 10 times with 10 seconds between retries + for ((loop=0;loop<10;loop++)); do + # Get the disk UUID for the provided disk device path. + DISK_UUID=$(system host-disk-list ${NODE} ${CLI_NOWRAP} | grep ${DISK_DEVICE_PATH} | awk '{print $2}') + + # If we couldn't obtain the disk UUID then retry. + if [ "${DISK_UUID}" != "" ]; then + break + else + sleep 10 + fi + done + + # If we couldn't obtain the disk UUID, return with Error. + if [ "${DISK_UUID}" == "" ]; then + echo "ERROR: could not find the disk (${DISK_DEVICE_PATH}) for ${NODE}" + return 4 + fi + + # Get the sizes of partitions existing on the current disk. + local present_partition_sizes=$(system host-disk-partition-list ${NODE} --disk ${DISK_UUID} ${CLI_NOWRAP} | grep "ba5eba11" | awk -F'|' '{print $7}' | tr '\r\n' ' ') + PARTITION_SIZES=$(echo ${partitionArgArray[@]}) + + + # Remove present partition sizes from the requested partition sizes (in case this is after lab_cleanup). + if [[ -n "${present_partition_sizes}" ]]; then + PARTITION_SIZES_COPY=",${PARTITION_SIZES// /,}," + log "We have other partitions present, determine which we still need to create." + for present_size in ${present_partition_sizes}; do + # lab_setup script uses int values for partition sizes, + # while sysinv reports them as float values, so in order + # to properly compare them, we turn the sysinv ones into + # integer values. + int_present_size=${present_size%.*} + PARTITION_SIZES_COPY=${PARTITION_SIZES_COPY/,$int_present_size,/,} + done + + PARTITION_SIZES=${PARTITION_SIZES_COPY//,/ } + fi + + if [[ -z "${PARTITION_SIZES-}" ]]; then + log "No new partitions to add, return.." + return 0 + fi + + invprovision=$(system host-show ${NODE} | grep "invprovision" | awk '{print $4;}') + + # Create all the requested partitions. + for PARTITION_SIZE in ${PARTITION_SIZES}; do + log "Create part of size $PARTITION_SIZE on node $NODE, disk $DISK_DEVICE_PATH" + local new_partition=$(system host-disk-partition-add -t lvm_phys_vol ${NODE} ${DISK_UUID} ${PARTITION_SIZE}) + local new_partition_uuid=$(echo $new_partition | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + + if [ "${new_partition_uuid}" == "" ]; then + echo "ERROR: Could not create partition of ${PARTITION_SIZE} GiB on disk ${DISK_UUID} for ${NODE}" + return 1 + fi + + # If this is controller-0 or the host has already been provisioned, create the partition in-service, so wait for it to become Ready. + if [[ $NODE == *"controller-0"* || $invprovision == "provisioned"* ]]; then + local PARTITION_DELAY=0 + while [[ $PARTITION_DELAY -lt $PARTITION_CREATE_TIMEOUT ]]; do + partition_status=$(system host-disk-partition-list ${NODE} ${CLI_NOWRAP} | grep ${new_partition_uuid} | awk '{print $(NF-1)}') + if [[ ${partition_status} != *"Ready"* ]]; then + log "Waiting for partition ${new_partition_uuid} to become Ready. Status: ${partition_status}" + sleep 5 + PARTITION_DELAY=$((PARTITION_DELAY + 5)) + else + log "Partition ${new_partition_uuid} is Ready" + break + fi + done + + if [[ $PARTITION_DELAY -eq $PARTITION_CREATE_TIMEOUT ]]; then + echo "ERROR: Timed out waiting for partition ${new_partition_uuid} to become Ready" + return 1 + fi + fi + done + + return 0 +} + +## Extend cgts-vg - AIO only. +## +## Use the mentioned devices (disks/partitions) for Physical Volumes to extend cgts-vg: +## _CGTS_STORAGE="device_path_1|device_path_2|device_path_3" +## +## Examples: +## - extend controller-0's cgts-vg with one disk based PV and one partition based PV: +## CONTROLLER0_CGTS_STORAGE="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0|/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0-part6" +## +## - extend controller-1's cgts-vg with one partition based PV: +## CONTROLLER1_CGTS_STORAGE="/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0-part5" +function setup_cgts_extend { + NODE=$1 + DEVICES=$2 + + info "Extending cgts-vg for host $NODE" + + local PROVISION_PV_TIMEOUT=180 + + for DEVICE in "${DEVICES[@]}"; do + # Check if this PV is already in cgts-vg. + # Special case: after lab_cleanup we may already have this PV in cgts-vg. + local CGTS_VG=$(system host-pv-list $NODE ${CLI_NOWRAP} | grep $DEVICE | awk -F'|' '{print $9}') + if [[ "${CGTS_VG}" == *"cgts-vg"* ]]; then + log "Skipping adding device $DEVICE to cgts-vg for ${NODE}; already done" + continue + fi + + if [[ $NODE == *"worker"* ]]; then + log_command "system host-lvg-add $NODE cgts-vg" + fi + + log_command "system host-pv-add $NODE cgts-vg $DEVICE" + + if [[ $NODE != *"controller-0"* ]]; then + log "$NODE is not controller-0, don't wait for extending cgts-vg" + continue + fi + + local PROVISION_PV_DELAY=0 + while [[ $PROVISION_PV_DELAY -lt $PROVISION_PV_TIMEOUT ]]; do + pv_state=$(system host-pv-list $NODE ${CLI_NOWRAP} | grep $DEVICE | awk '{print $12;}') + if [[ ${pv_state} != *"provisioned"* ]]; then + log "Waiting for PV ${DEVICE} to become Ready. Status: ${pv_state}" + sleep 5 + PROVISION_PV_DELAY=$((PROVISION_PV_DELAY + 5)) + else + log "PV ${DEVICE} is provisioned" + break + fi + + if [[ $PROVISION_PV_DELAY -eq $PROVISION_PV_TIMEOUT ]]; then + echo "ERROR: Timed out waiting for PV ${DEVICE} to be provisioned" + return 1 + fi + done + done + + return 0 +} + +function add_partitions { + local NODE="" + local PART_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if (($6=="controller" || $6=="worker") && ($12 != "offline")) print $4;}') + + for NODE in ${PART_NODES}; do + # We should never skip this as we may want to add + # additional partitions at a later date. + # Useful for chaining config files. + info "Setting partitions for host $NODE" + + local SETTINGS_STRING=$(get_node_variable ${NODE} PARTITIONS) + local FIELD_SEPARATORS="${SETTINGS_STRING//[^|]}" + local SETTINGS_ARRAY=(${SETTINGS_STRING//|/ }) + unset MERGE_PARTITION_INFO + declare -A MERGE_PARTITION_INFO + + # When chaining multiple config files, the same node may contain + # partition info that is not all in one place. Example: + # {NODE}_PARTITIONS="{disk1},[1,2,3]|{disk2},[3,4,5]|{disk1},[1,5,6]" + # This will merge then into: + # {NODE}_PARTITIONS="{disk1},[1,2,3,1,5,6]|{disk2},[3,4,5]" + if [ ${#SETTINGS_ARRAY[@]} -ne 0 ]; then + for DISK_PART_SETTING in "${SETTINGS_ARRAY[@]}"; do + local DISK_PART_SETTING_ARRAY=(${DISK_PART_SETTING//[\]\[,]/ }) + local PARTITION_SIZES=("${DISK_PART_SETTING_ARRAY[@]:1}") + local DISK_DEVICE_PATH=${DISK_PART_SETTING_ARRAY[0]} + + set +u + if [ -z "${MERGE_PARTITION_INFO[${DISK_DEVICE_PATH}]}" ]; then + MERGE_PARTITION_INFO[$DISK_DEVICE_PATH]="${PARTITION_SIZES[@]}" + else + MERGE_PARTITION_INFO["${DISK_DEVICE_PATH}"]="${MERGE_PARTITION_INFO["${DISK_DEVICE_PATH}"]} ${PARTITION_SIZES[@]}" + fi + set -u + done + fi + + for disk_key in "${!MERGE_PARTITION_INFO[@]}"; do + P_SIZES=(${MERGE_PARTITION_INFO[$disk_key]}) + setup_partitions ${NODE} ${disk_key} P_SIZES[@] + RET=$? + if [[ ${RET} -ne 0 ]]; then + echo "ERROR: Could not create all the partitions for host ${NODE}" + return ${RET} + fi + done + + done + + return 0 +} + + +function extend_cgts_vg { + if [[ "${SMALL_SYSTEM}" != "yes" && "${K8S_ENABLED}" != "yes" ]]; then + log "This is not an AIO setup, we don't extend cgts-vg, so return." + return + fi + + local NODE="" + local ALL_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if (($6=="controller" || $6=="worker") && ($12 != "offline")) print $4;}') + + for NODE in ${ALL_NODES}; do + if is_stage_complete "extend_cgts_vg" ${NODE}; then + info "Skipping cgts-vg extend for ${NODE}; already done" + continue + fi + + local SETTINGS_STRING=$(get_node_variable ${NODE} CGTS_STORAGE) + local FIELD_SEPARATORS="${SETTINGS_STRING//[^|]}" + + local SETTINGS_ARRAY=(${SETTINGS_STRING//|/ }) + if [ ${#SETTINGS_ARRAY[@]} -ne 0 ]; then + local DEVICES=("${SETTINGS_ARRAY[@]:0}") + setup_cgts_extend ${NODE} ${DEVICES} + else + info "No cgts-vg extension for ${NODE}" + fi + + stage_complete "extend_cgts_vg" ${NODE} + done + + return 0 +} + +function wait_for_backend_configuration { + local BACKEND_TYPE=$1 + local BACKEND_NAME=$2 + local CINDER_CONFIGURED_TIMEOUT=600 + local CINDER_CONFIGURED_DELAY=0 + while [[ $CINDER_CONFIGURED_DELAY -lt $CINDER_CONFIGURED_TIMEOUT ]]; do + backend_info=$(system storage-backend-list ${CLI_NOWRAP} | grep ${BACKEND_NAME}) + backend_status=$(echo ${backend_info}| awk '{print $8}') + backend_task=$(echo ${backend_info}| awk '{print $10}') + + if [[ ${backend_status} == "configuration-failed" ]]; then + log "${BACKEND_NAME} backend (${BACKEND_NAME}): configuration failed" + return 1 + fi + + if [[ ${backend_status} != *"configured"* && ${backend_task} != *"reconfig-controller"* ]]; then + log "Waiting for ${BACKEND_TYPE} backend ${BACKEND_NAME} to become configured info: ${backend_info}" + sleep 10 + CINDER_CONFIGURED_DELAY=$((CINDER_CONFIGURED_DELAY + 10)) + else + log "${BACKEND_NAME} backend (${BACKEND_NAME}): configuration complete" + break + fi + done + + if [[ CINDER_CONFIGURED_DELAY -eq CINDER_CONFIGURED_TIMEOUT ]]; then + echo "ERROR: timed out waiting for ${BACKEND_NAME} backend (${BACKEND_NAME}) to become configured" + return 1 + fi +} + +function storage_backend_enable { + BACKEND_TYPE=$1 + if is_stage_complete "storage_backend_enable" ${BACKEND_TYPE}; then + info "Skipping storage backend ${BACKEND_TYPE} enabling; already done" + return 0 + else + info "Enabling ${BACKEND_TYPE} storage backend" + fi + + if [[ ${BACKEND_TYPE} == *"ceph"* ]]; then + if [ "$K8S_ENABLED" != "yes" ]; then + #TODO: Refactor services based on cinder & glance's backends + #TODO: Add nova as a service to use ceph ephemeral pool for + # remote instance-backing + STORAGE_SERVICES="cinder,glance,nova" + else + STORAGE_SERVICES="" + fi + BACKEND_NAME="ceph-store" + fi + + if [[ ${BACKEND_TYPE} == *"lvm"* ]]; then + STORAGE_SERVICES="cinder" + BACKEND_NAME="lvm-store" + fi + + if [[ ${BACKEND_TYPE} == *"external"* ]]; then + STORAGE_SERVICES="cinder" + BACKEND_NAME="shared_services" + fi + + # TODO: Support storage backend modifications on successive lab_setup executions + # (if backend is new use storage-backend-add otherwise use storage-backend-modify) + NAME="${BACKEND_TYPE}-store" + ID=$(system storage-backend-list ${CLI_NOWRAP} | awk -v NAME=${BACKEND_NAME} '{if ($4 == NAME) { print $2 }}') + if [ -z "${ID}" ]; then + if [[ ${STORAGE_SERVICES} == "" ]]; then + log_command "system storage-backend-add ${BACKEND_TYPE} ${STORAGE_CEPH_CAPABILITIES} --confirmed" + else + log_command "system storage-backend-add ${BACKEND_TYPE} ${STORAGE_CEPH_CAPABILITIES} -s ${STORAGE_SERVICES} --confirmed" + fi + else + if [[ ${BACKEND_TYPE} == *"external"* ]]; then + log_command "system storage-backend-modify ${BACKEND_NAME} -s cinder,glance" + NAME=${BACKEND_NAME} + fi + fi + + wait_for_backend_configuration $BACKEND_TYPE $NAME + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + fi + + stage_complete "storage_backend_enable" ${BACKEND_TYPE} + + return 0 +} + + +function setup_neutron_service_parameters { + if is_stage_complete "neutron_service_param"; then + info "Skipping Neutron service parameter configuration; already done" + return 0 + fi + + info "Adding Neutron service parameter configuration" + + if [ ! -z "${NEUTRON_BASE_MAC}" ]; then + log_command "system service-parameter-add network default base_mac=${NEUTRON_BASE_MAC}" + fi + + if [ ! -z "${NEUTRON_DVR_BASE_MAC}" ]; then + log_command "system service-parameter-add network default dvr_base_mac=${NEUTRON_DVR_BASE_MAC}" + fi + + if [ ! -z "${NEUTRON_EXTENSION_DRIVERS}" ]; then + log_command "system service-parameter-add network ml2 extension_drivers=${NEUTRON_EXTENSION_DRIVERS}" + fi + + log_command "system service-parameter-apply network" + + # Wait for neutron-server to finish restarting + wait_for_service_parameters + + stage_complete "neutron_service_param" + return 0 +} + +function setup_odl_service_parameters { + if [ -z "${SDN_ODL_URL}" ]; then + echo "ERROR: SDN_ODL_URL configuration parameter must be defined" + return 1 + fi + + log_command "system service-parameter-add network ml2 mechanism_drivers=${SDN_ODL_MECHANISM_DRIVERS}" + log_command "system service-parameter-add network ml2_odl url=${SDN_ODL_URL}" + log_command "system service-parameter-add network ml2_odl username=${SDN_ODL_USERNAME}" + log_command "system service-parameter-add network ml2_odl password=${SDN_ODL_PASSWORD}" + log_command "system service-parameter-add network ml2_odl port_binding_controller=${SDN_ODL_PORT_BINDING_CONTROLLER}" + + if [ ! -z "${SDN_ODL_SERVICE_PLUGINS}" ]; then + log_command "system service-parameter-add network default service_plugins=${SDN_ODL_SERVICE_PLUGINS}" + fi + + if [ "${FORCE_METADATA}" == "yes" ]; then + log_command "system service-parameter-add network dhcp force_metadata=true" + fi + + log_command "system service-parameter-apply network" + + # Wait for neutron-server to finish restarting + wait_for_service_parameters + + return 0 +} + + +## Setup SDN service parameters +## +function setup_sdn_service_parameters { + if is_stage_complete "sdn_service_param"; then + info "Skipping SDN service parameter configuration; already done" + return 0 + fi + + info "Adding SDN service parameter configuration" + + if [ "${SDN_NETWORKING}" == "opendaylight" ]; then + setup_odl_service_parameters + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup opendaylight service parameters" + return ${RET} + fi + else + echo "ERROR: unsupported SDN_NETWORKING type of \"${SDN_NETWORKING}\"" + return 4 + fi + + stage_complete "sdn_service_param" + + return 0 +} + + +## Setup SFC service parameters +## +function setup_sfc_service_parameters { + if is_stage_complete "sfc_service_param"; then + info "Skipping SFC service parameter configuration; already done" + return 0 + fi + + info "Adding SFC service parameter configuration" + + log_command "system service-parameter-add network sfc sfc_drivers=${SFC_SFC_DRIVERS}" + log_command "system service-parameter-add network sfc flowclassifier_drivers=${SFC_FLOW_CLASSIFIER_DRIVERS}" + + + log_command "system service-parameter-apply network" + + stage_complete "sfc_service_param" + + return 0 +} + + +## Setup SDN controllers +## +function setup_sdn_controllers { + if [ -z "${SDN_CONTROLLERS}" ]; then + echo "ERROR: SDN_CONTROLLERS configuration parameter must be defined" + return 1 + fi + + if is_stage_complete "sdn_controllers"; then + info "Skipping SDN controllers configuration; already done" + return 0 + fi + + info "Adding SDN controllers" + + for CONTROLLER in ${SDN_CONTROLLERS}; do + local DATA=(${CONTROLLER//:/ }) + local TRANSPORT=${DATA[0]^^} + local HOST=${DATA[1]} + local PORT=${DATA[2]} + log_command "system sdn-controller-add -a ${HOST} -p ${PORT} -t ${TRANSPORT}" + done + + stage_complete "sdn_controllers" + + return 0 +} + +## Enable SDN +## +function enable_sdn { + if is_stage_complete "enable_sdn"; then + info "Skipping SDN enabling; already done" + return 0 + fi + + info "Enabling SDN" + log_command "system modify --sdn_enabled=true" + + stage_complete "enable_sdn" + return 0 +} + +## Setup Software Defined Networking (SDN) +## +function setup_sdn { + if [ "${SDN_ENABLED}" != "yes" ]; then + # SDN is not enabled + return 0 + fi + + source ${OPENRC} + + enable_sdn + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to enable SDN" + return ${RET} + fi + + setup_sdn_service_parameters + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup SDN service parameters" + return ${RET} + fi + + setup_sdn_controllers + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup SDN controllers" + return ${RET} + fi + + return 0 +} + +## Setup Service Function Chaining (SFC) +## +function setup_sfc { + if [ "${SFC_ENABLED}" != "yes" ]; then + # SFC is not enabled + return 0 + fi + + source ${OPENRC} + + setup_sfc_service_parameters + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup SDN service parameters" + return ${RET} + fi + + return 0 +} + +# Https Changes +function is_https_security_configuration { + if [ ! -z "${HTTPS_ENABLED}" ] ; then + # A value is set for https , could be true or false - + SYSTEM_HTTPS_ENABLED=$(system show | awk '{ if ($2 == "https_enabled") print $4;}'); + + if [ "${SYSTEM_HTTPS_ENABLED}" != "${HTTPS_ENABLED}" ]; then + return 0 + else + # configuration value in .conf file matches with system show value . + # no changes needed. + return 1 + fi + else + # Lab is not https enabled .conf file has value for HTTPS_ENABLED? + return 1 + fi + +} + +function setup_https_security_configuration { + set +e + is_https_security_configuration + RET=$? + if [ ${RET} -eq 0 ]; then + info "Changing HTTPS security configuration .." + source ${OPENRC} + + if [ "$HTTPS_ENABLED" == "True" ]; then + log_command "system modify --https_enabled=true" + elif [ "$HTTPS_ENABLED" == "False" ]; then + log_command "system modify --https_enabled=false" + fi + + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Error changing HTTPS confuguration $RET" + exit 0 + fi + + # Wait for puppet operations to be over + DELAY=0 + while [ $DELAY -lt ${HTTPS_TIMEOUT} ]; do + + CONFIG_APPLIED=$(system host-show controller-0 | awk '{ if ($2 == "config_applied") print $4;}'); + CONFIG_TARGET=$(system host-show controller-0 | awk '{ if ($2 == "config_target") print $4;}'); + + ICONFIG_APPLIED=${CONFIG_APPLIED:(-35)} + ICONFIG_TARGET=${CONFIG_TARGET:(-35)} + if [ "${ICONFIG_APPLIED}" != ${ICONFIG_TARGET} ]; then + DELAY=$((DELAY + 5)) + sleep 5 + else + echo "HTTPS security configuration change is successful." + # Now check for a CA certificate entry in conf file . if it exists and + # if we are converting the lab from http to https , copy the CA certificate + # No need to copy certificate if the conversion is from https to http + + if [ ! -z "${CERTIFICATE_FILENAME}" ] && [ "${HTTPS_ENABLED}" == "True" ] ; then + # check if the file exists + if [ -f "${CERTIFICATE_FILENAME}" ]; then + if [ "$TPM_ENABLED" == "True" ]; then + # This is the initial certificate setup. For TPM configurations, we will need + # to do it again once the second controller node is up + # Otherwise, the TPM configuration will be missing on the second controller + # So we need to make a copy, since the install operation will delete the input file provided + cp ${CERTIFICATE_FILENAME} copy_of_${CERTIFICATE_FILENAME} + log_command "system certificate-install -m tpm_mode copy_of_${CERTIFICATE_FILENAME}" + else + log_command "system certificate-install ${CERTIFICATE_FILENAME}" + fi + RET=$? + return ${RET} + else + echo "CA certificate (${CERTIFICATE_FILENAME}) does not exist. \ + Please install the CA certificate manually\ + using system certificate-install " + fi + fi + + return 0 + fi + done + else + echo "HTTPS security confguration change is not required." + local CONTROLLER_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if ($6=="controller" && ($12 != "offline")) print $4;}') + local COUNT=$(echo ${CONTROLLER_NODES} | wc -w) + if [ ${COUNT} -gt 1 ]; then + # now that the second controller is up, we need to install the CA certificate for https again if using TPM + if [ ! -z "${CERTIFICATE_FILENAME}" ] && [ "${HTTPS_ENABLED}" == "True" ] && [ "$TPM_ENABLED" == "True" ]; then + # check if the file exists + if [ -f "${CERTIFICATE_FILENAME}" ]; then + log_command "system certificate-install -m tpm_mode ${CERTIFICATE_FILENAME}" + fi + fi + + fi + fi + + return 0 +} + +function setup_providernet_tenants_quota_crentials { + if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xsubcloud" ]; then + add_tenants + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to add tenants, ret=${RET}" + exit ${RET} + fi + fi + + ## Cache the tenantid values + ADMINID=$(get_tenant_id admin) + TENANT1ID=$(get_tenant_id ${TENANT1}) + TENANT2ID=$(get_tenant_id ${TENANT2}) + + create_credentials + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to create credentials, ret=${RET}" + exit ${RET} + fi + + if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xsubcloud" ]; then + set_quotas + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set quotas, ret=${RET}" + exit ${RET} + fi + fi + + if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xsubcloud" ]; then + if [ ${GROUPNO} -eq 0 ]; then + setup_flavors + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup flavors, ret=${RET}" + exit ${RET} + fi + fi + + create_custom_flavors + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to create custom flavors, ret=${RET}" + exit ${RET} + fi + fi + + if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xsubcloud" ]; then + setup_keys + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup keys, ret=${RET}" + exit ${RET} + fi + fi + + return 0 + +} + +##Wait for applicatin apply to complete +function wait_for_application_apply { + local APPLICATION_CONFIGURED_TIMEOUT=3600 + local APPLICATION_CONFIGURED_DELAY=0 + while [[ $APPLICATION_CONFIGURED_DELAY -lt $APPLICATION_CONFIGURED_TIMEOUT ]]; do + app_info=$(system application-list ${CLI_NOWRAP} | grep stx-openstack) + app_status=$(echo ${app_info}| awk '{print $8}') + #app_task=$(echo ${app_info}| awk '{print $10}') + + if [[ ${app_status} == "apply-failed" ]]; then + echo "Application stx-openstack : apply failed" + return 1 + fi + + if [[ ${app_status} == *"applying"* ]]; then + log "Waiting for application to become applied info: stx-openstack" + sleep 10 + APPLICATION_CONFIGURED_DELAY=$((APPLICATION_CONFIGURED_DELAY + 10)) + else + log "stx-openstack application: apply complete" + break + fi + done + + if [[ APPLICATION_CONFIGURED_DELAY -eq APPLICATION_CONFIGURED_TIMEOUT ]]; then + echo "ERROR: timed out waiting for stx-openstack to become applied" + return 1 + fi + return 0 +} + +#Wait for application upload to complete +function wait_for_application_upload { + local APPLICATION_CONFIGURED_TIMEOUT=600 + local APPLICATION_CONFIGURED_DELAY=0 + while [[ $APPLICATION_CONFIGURED_DELAY -lt $APPLICATION_CONFIGURED_TIMEOUT ]]; do + app_info=$(system application-list ${CLI_NOWRAP} | grep stx-openstack) + app_status=$(echo ${app_info}| awk '{print $8}') + + if [[ ${app_status} == "upload-failed" ]]; then + log "$Application stx-openstack : upload failed" + return 1 + fi + + if [[ ${app_status} == *"uploading"* ]]; then + log "Waiting for application to become uploaded info: stx-openstack" + sleep 10 + APPLICATION_CONFIGURED_DELAY=$((APPLICATION_CONFIGURED_DELAY + 10)) + else + log "stx-openstack application: upload complete" + break + fi + done + + if [[ APPLICATION_CONFIGURED_DELAY -eq APPLICATION_CONFIGURED_TIMEOUT ]]; then + echo "ERROR: timed out waiting for stx-openstack to become uploaded" + return 1 + fi + return 0 +} + +#upload application (helm-charts) and apply application +function setup_kube_pods { + if [[ "$K8S_ENABLED" != "yes" ]]; then + return 0 + fi + + if is_stage_complete "openstack_deployment"; then + info "Skipping Openstack app deployment configuration; already done" + return 0 + fi + + # Add DNS Cluster, might need to remove later + # DNS_EP=$(kubectl describe svc -n kube-system kube-dns | awk /IP:/'{print $2}') + # log_command "system dns-modify nameservers="$DNS_EP,8.8.8.8"" + # RET=$? + #log_command "ceph osd pool ls | xargs -i ceph osd pool set {} size 1" + + app_info=$(system application-list ${CLI_NOWRAP} | grep stx-openstack) + app_status=$(echo ${app_info}| awk '{print $8}') + echo "app status is: ${app_status}" + if [[ ${app_status} == "upload-failed" || ${app_status} == "apply-failed" ]]; then + log "Application upload/apply: configuration failed" + return 1 + fi + + if [[ ${app_status} == *"applied"* ]]; then + log "Application Already applied" + stage_complete "openstack_deployment" + return 0 + fi + + if [[ ${app_status} == *"applying"* ]]; then + wait_for_application_apply + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + else + echo "Openstack App is applied" + return 0 + fi + elif [[ ${app_status} == *"uploaded"* ]]; then + info "Applying stx-openstack application" + log_command "system application-apply stx-openstack" + wait_for_application_apply + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + fi + else + info "Uploading stx-openstack application" + log_command "system application-upload stx-openstack helm-charts-manifest-no-tests.tgz" + wait_for_application_upload + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + fi + info "Applying stx-openstack application" + log_command "system application-apply stx-openstack" + wait_for_application_apply + RET=$? + if [ ${RET} -ne 0 ]; then + exit ${RET} + fi + fi + + #log_command "ceph osd pool ls | xargs -i ceph osd pool set {} size 1" + stage_complete "openstack_deployment" + return 0 +} + +#wait for ceph monitor to be ready +function wait_for_ceph_mon { + local CEPH_MON_CONFIGURED_TIMEOUT=600 + local CEPH_MON_CONFIGURED_DELAY=0 + local NODE=$1 + while [[ $CEPH_MON_CONFIGURED_DELAY -lt $CEPH_MON_CONFIGURED_TIMEOUT ]]; do + ceph_info=$(system ceph-mon-list ${CLI_NOWRAP} | grep ${NODE}) + ceph_status=$(echo ${ceph_info}| awk '{print $8}') + + if [[ ${ceph_status} == *"configuring"* ]]; then + log "Waiting for ceph mon to become configured on ${NODE}" + sleep 10 + CEPH_MON_CONFIGURED_DELAY=$((CEPH_MON_CONFIGURED_DELAY + 10)) + else + log "ceph mon configured on ${NODE}" + break + fi + done + + if [[ CEPH_MON_CONFIGURED_DELAY -eq CEPH_MON_CONFIGURED_TIMEOUT ]]; then + echo "ERROR: timed out waiting for ceph mon to become configured" + return 1 + fi + return 0 +} + +#add ceph monitor for standard system +function add_ceph_mon { + local STORAGE_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if ($6 == "storage") print $4}') + if [ ! -z "${STORAGE_NODES}" ]; then + return 0 + fi + if is_stage_complete "ceph-mon"; then + info "Skipping k8s cert configuration; already done" + return 0 + fi + local HOSTS=$(system host-list ${CLI_NOWRAP} | awk '{if ($6 == "worker") print $4}') + if [ ! -z "${HOSTS}" ]; then + log_command "system ceph-mon-add "${HOSTS[0]}"" + wait_for_ceph_mon ${HOSTS[0]} + echo "After unlocking worker nodes, lock controller-1" + echo "Add osd to controller-1, unlock" + echo "swact to controller-1, lock controller-0" + echo "Add osd to controller-0, unlock" + echo "swact back to controller-0, and run lab_setup.sh" + stage_complete "ceph-mon" + fi + + return 0 +} + +#install cert +function install_k8s_cert { + if [ "$K8S_ENABLED" != "yes" ]; then + return 0 + fi + if is_stage_complete "k8scert"; then + info "Skipping k8s cert configuration; already done" + return 0 + fi + + log_command "git archive --remote=git://147.11.178.22/users/rchurch/k8s/tools.git stx-containers2 ca-cert.pem | tar -x" + log_command "echo ${DEFAULT_OPENSTACK_PASSWORD} sudo -S cp ca-cert.pem /etc/pki/ca-trust/source/anchors/" + log_command "echo ${DEFAULT_OPENSTACK_PASSWORD} sudo -S update-ca-trust" + + stage_complete "k8scert" + return 0 +} + +#Add labels +function add_k8s_label { + local ALL_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if (($6=="controller" || $6=="worker") && ($12 != "offline")) print $4;}') + for NODE in ${ALL_NODES}; do + if is_stage_complete "k8s_label" ${NODE}; then + info "Skipping k8s label configuration for ${NODE}; already done" + continue + fi + if [[ "${NODE}" == "controller"* ]]; then + info "Adding K8S label for ${NODE}" + log_command "system host-label-assign ${NODE} openstack-control-plane=enabled" + if [[ "${SMALL_SYSTEM}" == "yes" ]]; then + log_command "system host-label-assign ${NODE} openstack-worker-node=enabled" + log_command "system host-label-assign ${NODE} openvswitch=enabled" + log_command "system host-label-assign ${NODE} sriov=enabled" + fi + elif [[ "${NODE}" == "worker"* ]]; then + info "Adding K8S label for ${NODE}" + log_command "system host-label-assign ${NODE} openstack-worker-node=enabled" + log_command "system host-label-assign ${NODE} openvswitch=enabled" + log_command "system host-label-assign ${NODE} sriov=enabled" + fi + stage_complete "k8s_label" ${NODE} + done + return 0 +} + +# +# Https Changes + + +if [ -f "${STATUS_FILE}" ]; then + rm -f ${STATUS_FILE} +fi + + +log "============================================================================" +log "Starting lab setup (${CONFIG_FILE}): $(date)" +declare -p >> ${LOG_FILE} +log "============================================================================" + +echo "Checking for required files" +check_required_files +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to check required files, ret=${RET}" + exit ${RET} +fi + +if [ ! -z "${SYSTEM_NAME}" ]; then + set_system_name ${SYSTEM_NAME} + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set system name, ret=${RET}" + exit ${RET} + fi +fi + +set_vswitch_type ${VSWITCH_TYPE} +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to set vswitch type, ret=${RET}" + exit ${RET} +fi + +# Check HTTPS configuration. +# 1. if the config file has HTTPS_ENABLED=true/false and the system +# https configuration is different than the config file value , +# we will modify the sytem https value to match with config file value. +# This will happen at the first stages of lab setup as the endpoints +# will be converted to https , and other service use these endpoints +# 2. If the config file has CERTIFICATE_FILENAME , +# parameter , we will copy the certificate using +# "system certificate-install" . This will be done as one +# of the last steps in lab_setup +setup_https_security_configuration +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to set HTTPS Security configuration , ret=${RET}" + exit ${RET} +fi + +setup_neutron_service_parameters +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to setup BGP service parameters" + return ${RET} +fi + +setup_sdn +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to setup SDN, ret=${RET}" + exit ${RET} +fi + +setup_sfc +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to setup SFC, ret=${RET}" + exit ${RET} +fi + +if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xsubcloud" ]; then + set_dns + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set DNS configuration, ret=${RET}" + exit ${RET} + fi + + set_time_service + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set $TIMESERVICE configuration, ret=${RET}" + exit ${RET} + fi +fi + +add_partitions +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to create partitions, ret=${RET}" + exit ${RET} +fi + +extend_cgts_vg +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to extend cgts-vg, ret=${RET}" + exit ${RET} +fi + +add_local_storage +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add local storage, ret=${RET}" + exit ${RET} +fi + +# Early configuration of Ceph storage backend +if [ "${CONFIGURE_STORAGE_CEPH}" == "yes" -a "${WHEN_TO_CONFIG_CEPH}" == "early" ]; then + storage_backend_enable "ceph" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set Ceph storage backend configuration, ret=${RET}" + exit ${RET} + fi +fi + +# On secondary region, if cinder is shared from primary region, +# it will be added as a service to the external backend +if [[ ${CINDER_BACKENDS} =~ "external" ]]; then + storage_backend_enable "external" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set external storage backend configuration, ret=${RET}" + exit ${RET} + fi + # Because we create duplicate endpoints in the primary region, let's + # wait a bit more + sleep 20 +fi + +if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xcontroller" ]; then + add_provider_networks + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to add data networks, ret=${RET}" + exit ${RET} + fi +fi + +# Setup custom firewall rules if file is specified +if [ ! -z "${FIREWALL_RULES_FILE:-}" ]; then + setup_custom_firewall_rules +fi + +install_k8s_cert +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add data interfaces, ret=${RET}" + exit ${RET} +fi + +add_k8s_label +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add labels, ret=${RET}" + exit ${RET} +fi + + +if [ "x${SMALL_SYSTEM}" != "xyes" ]; then + FILE=${HOME}/.lab_setup.group${GROUPNO}.waitfornodes + if [ ! -f ${FILE} -a "x${PAUSE_CONFIG}" == "xyes" ]; then + touch ${FILE} + echo "" + echo "WARNING:" + echo "Stopping after last operation. Rerun this command after" + if [ "${CONFIGURE_STORAGE_CEPH}" == "yes" -a "${WHEN_TO_CONFIG_CEPH}" == "early" ]; then + echo "controller-1 plus all storage & worker nodes have become 'online' " + echo "after inital installation." + echo "" + else + echo "all worker nodes have become 'online' after initial" + echo "installation." + echo "" + fi + exit 0 + fi +fi + +if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xcontroller" ]; then + COUNT=$(echo ${NODES} | wc -w) + if [ ${COUNT} -eq 0 -a -z "${STORAGE_NODES}" -a -z "${CONTROLLER_NODES}" ]; then + echo "" + echo "ERROR: There are no storage, worker or controller+worker nodes online" + echo "" + exit 1 + fi +fi + +if [[ (("${VIRTUALNODES}" == "yes")) ]]; then + write_flag_file + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to write flags, ret=${RET}" + exit ${RET} + fi + + set_reserved_memory + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set memory, ret=${RET}" + exit ${RET} + fi +fi + +add_interfaces +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add data interfaces, ret=${RET}" + exit ${RET} +fi + +if [ "x${BM_ENABLED}" == "xyes" ]; then + add_board_management + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to board management configuration, ret=${RET}" + exit ${RET} + fi +fi + +source ${OPENRC} +# Setup the OSD if the system doesn't have separate ceph storage nodes +setup_osd_storage + + +setup_vswitch_cpus +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to setup vswitch CPU configuration, ret=${RET}" + exit ${RET} +fi + +add_data_addresses_and_routes + +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add IP addresses and routes to data interfaces, ret=${RET}" + exit ${RET} +fi + +if [ -z "${AVAIL_CONTROLLER_NODES}" -a "${SYSTEM_MODE}" != "simplex" ]; then + if [ "${CONFIGURE_STORAGE_CEPH}" == "yes" -a -n "${CONTROLLER_NODES}" ]; then + if [ "x${PAUSE_CONFIG}" == "xyes" ]; then + echo "" + echo "WARNING:" + echo "Stopping after initial controller node setup. Rerun this command" + echo "after ${CONTROLLER_NODES} has been unlocked and is enabled." + echo "" + exit 0 + fi + fi +fi + +if [ "${CONFIGURE_STORAGE_CEPH}" == "yes" -a "${WHEN_TO_CONFIG_CEPH}" == "early" ]; then + if [ ! -z "${STORAGE_NODES}" ]; then + + setup_ceph_storage_tiers + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup additional ceph storage tiers, ret=${RET}" + exit ${RET} + fi + + setup_journal_storage + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup Journal storage, ret=${RET}" + exit ${RET} + fi + + setup_osd_storage + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup OSD storage, ret=${RET}" + exit ${RET} + fi + + setup_ceph_storage_tier_backends + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup additional ceph backends, ret=${RET}" + exit ${RET} + fi + + FILE=${HOME}/.lab_setup.group${GROUPNO}.storage + if [ ! -f ${FILE} -a "x${PAUSE_CONFIG}" == "xyes" ]; then + touch ${FILE} + echo "" + echo "WARNING:" + echo "Stopping after initial storage node setup. Rerun this command" + echo "after all storage nodes have been unlocked and are enabled, and" + echo "all worker nodes have become 'online' after initial installation." + echo "" + exit 0 + fi + fi +fi + +add_ceph_mon +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to add_ceph_mon, ret=${RET}" + exit ${RET} +fi + +if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xcontroller" ]; then + FILE=${HOME}/.lab_setup.group${GROUPNO}.interfaces + if [ ! -f ${FILE} -a "x${PAUSE_CONFIG}" == "xyes" ]; then + touch ${FILE} + COUNT=$(echo ${NODES} | wc -w) + if [ "x${SMALL_SYSTEM}" != "xyes" ]; then + echo "" + echo "WARNING:" + echo "Stopping after data interface setup. Rerun this command" + echo "after all worker nodes have been unlocked and are enabled." + echo "" + exit 0 + elif [ ${COUNT} -lt 2 ]; then + echo "" + echo "WARNING:" + echo "Stopping after data interface setup. Rerun this command" + echo "after controller-0 is unlocked and enabled" + echo "" + exit 0 + fi + + ## continue + fi +fi + +# Late configuration of Ceph storage backend +if [ "${CONFIGURE_STORAGE_CEPH}" == "yes" -a "${WHEN_TO_CONFIG_CEPH}" == "late" ]; then + ENABLED_CONTROLLER_NODES=$(system host-list ${CLI_NOWRAP} | awk '{if ($6=="controller" && ($10 == "enabled")) print $4;}') + + enabled_controller_nodes=( ${ENABLED_CONTROLLER_NODES} ) + + if [ "${#enabled_controller_nodes[@]}" -lt 2 ]; then + if [ "x${PAUSE_CONFIG}" == "xyes" ]; then + echo "" + echo "WARNING:" + echo "Stopping after initial controller node setup. Rerun this command" + echo "after both controller nodes have been unlocked and enabled." + echo "" + exit 0 + fi + fi + + #TODO: merge this and the early config in one function (to eliminate code duplication) + storage_backend_enable "ceph" + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to set Ceph storage backend configuration, ret=${RET}" + exit ${RET} + fi + + #TODO: Remove after sm supports in-service process restarts + FILE=${HOME}/.lab_setup.group${GROUPNO}.storage_backend_enable_ceph + if [ ! -f ${FILE} -a "x${PAUSE_CONFIG}" == "xyes" ]; then + touch ${FILE} + echo "" + echo "WARNING:" + echo "Stopping after Ceph backend provisioning. Rerun this command" + echo "after doing the following steps:" + echo " 1. lock and unlock controller-1" + echo " 2. swact to controller-1" + echo " 3. lock and unlock controller-0" + echo " 4. swact to contoller-0" + echo "" + exit 0 + fi + + storage_nodes=$(system host-list ${CLI_NOWRAP} | grep storage | awk '{if ($12 != "offline") {print $4;}}' | wc -l) + if [ ${storage_nodes} -lt 2 ]; then + echo "" + echo "WARNING:" + echo "Stopping after adding Ceph backend. Install at least two storage nodes " + echo "then rerun this command after they are online." + echo "" + exit 0 + fi + + setup_ceph_storage_tiers + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup additional Ceph storage tiers, ret=${RET}" + exit ${RET} + fi + + setup_journal_storage + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup Journal storage, ret=${RET}" + exit ${RET} + fi + + setup_osd_storage + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup OSD storage, ret=${RET}" + exit ${RET} + fi + + setup_ceph_storage_tier_backends + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup additional ceph backends, ret=${RET}" + exit ${RET} + fi + + FILE=${HOME}/.lab_setup.group${GROUPNO}.storage + if [ ! -f ${FILE} -a "x${PAUSE_CONFIG}" == "xyes" ]; then + touch ${FILE} + echo "" + echo "WARNING:" + echo "Stopping after initial storage node setup. Rerun this command" + echo "after all storage nodes have been unlocked and are enabled." + echo "" + exit 0 + fi +fi + +FILE=.no_openstack_install +if [ -f ${FILE} ]; then + echo "File to stop exsist, if you want to continue, remove .no_openstack_install file and run." + exit 0 +fi + +###Need to bring up the pods here for kubernetes: + +setup_kube_pods +RET=$? +if [ ${RET} -ne 0 ]; then + echo "Failed to apply application, ret=${RET}" + exit ${RET} +fi +if [ "$K8S_ENABLED" == "yes" ]; then + unset OS_AUTH_URL + export OS_AUTH_URL=${K8S_URL} + setup_providernet_tenants_quota_crentials + add_network_segment_ranges + +fi + +if [ "x${DISTRIBUTED_CLOUD_ROLE}" != "xcontroller" ]; then + + setup_internal_networks + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup infrastructure networks, ret=${RET}" + exit ${RET} + fi + + setup_management_networks + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup management networks, ret=${RET}" + exit ${RET} + fi + + setup_ixia_networks + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup ixia networks, ret=${RET}" + exit ${RET} + fi + + setup_tenant_networks + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup tenant networks, ret=${RET}" + exit ${RET} + fi + + if [ "x${FLOATING_IP}" == "xyes" ]; then + setup_floating_ips + RET=$? + if [ ${RET} -ne 0 ]; then + echo "Failed to setup floating IP addresses, ret=${RET}" + exit ${RET} + fi + fi +fi + + +source ${OPENRC} + +resize_controller_filesystem +RET=$? +if [ ${RET} -ne 0 ]; then + info "Failed to resize controller file systems, ret=${RET}" + exit ${RET} +fi + +log_command "system controllerfs-list" +log_command "drbd-overview" + +## return to admin context +source ${OPENRC} + + +echo "CONFIG_FILE=${CONFIG_FILE}" > ${STATUS_FILE} +echo "CONFIG_FILE=${CONFIG_FILE}" > ${GROUP_STATUS} +echo "Done" + +exit 0 diff --git a/deployment/virtualbox/pybox/configs/aio-sx/stx_config.ini_centos b/deployment/virtualbox/pybox/configs/aio-sx/stx_config.ini_centos new file mode 100644 index 00000000..b988955f --- /dev/null +++ b/deployment/virtualbox/pybox/configs/aio-sx/stx_config.ini_centos @@ -0,0 +1,19 @@ +[LOGICAL_INTERFACE_2] +LAG_INTERFACE=N +INTERFACE_MTU=1500 +INTERFACE_PORTS=enp0s3 + +[OAM_NETWORK] +IP_ADDRESS = 10.10.10.2 +CIDR=10.10.10.0/24 +GATEWAY=10.10.10.1 +LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 + +[AUTHENTICATION] +ADMIN_PASSWORD=Li69nux* + +[VERSION] +RELEASE = 19.01 + +[SYSTEM] +SYSTEM_MODE=simplex diff --git a/deployment/virtualbox/pybox/consts/__init__.py b/deployment/virtualbox/pybox/consts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deployment/virtualbox/pybox/consts/config.ini b/deployment/virtualbox/pybox/consts/config.ini new file mode 100644 index 00000000..b5e9177f --- /dev/null +++ b/deployment/virtualbox/pybox/consts/config.ini @@ -0,0 +1,75 @@ +[General] +controllers=2 +workers=2 +storage=0 +aio=False +deletevms=False +useexistinglab=True +securityprofile=standard +lowlatency=False +install_mode=graphical + +[PhysicalTopology] + [ControllerCEPH] + memory=8192, + cpus=2, + disks=[80000] + + [ControllerLVM] + memory=8192 + cpus=2 + disks=[100000, 10000] + + [ControllerAIO] + memory=12288, + cpus=2 + disks=[24000, 40000], + + [Compute] + memory=4096 + cpus=3 + disks=[50000, 30000] + + [Storage] + memory=3072 + cpus=1 + disks=[50000, 10000], + +[NetworkTopology] + [Controller] + 1={'nic': 'hostonly', 'intnet': 'none', 'nictype': '82540EM', 'nicpromisc': 'deny', 'hostonlyadapter': 'vboxnet0'} + 2={'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 3={'nic': 'intnet', 'intnet': 'intnet-infra', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + + [Compute] + 1={'nic': 'intnet', 'intnet': 'intnet-unused', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 2={'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 3={'nic': 'intnet', 'intnet': 'intnet-data1', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 4={'nic': 'intnet', 'intnet': 'intnet-data2', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + + [Storage] + 1={'nic': 'internal', 'intnet': 'intnet-unused', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 2={'nic': 'internal', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + 3={'nic': 'internal', 'intnet': 'intnet-infra', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'} + +[OAMHostOnlyNetwork] + ip=10.10.10.254 + netmask=255.255.255.0 + +[Lab] +name=vbox +floating_ip=10.10.10.7 +controller-0_ip=10.10.10.8 +controller-1_ip=10.10.10.9 +username=wrsroot +password=Li69nux* + +[Serial] +uartbase=0x3F8 +uartport=4 +uartmode=server +uartpath=/tmp + +[ISO] +isohost=localhost +isopath=/tmp/bootimage.iso diff --git a/deployment/virtualbox/pybox/consts/env.py b/deployment/virtualbox/pybox/consts/env.py new file mode 100644 index 00000000..c946e547 --- /dev/null +++ b/deployment/virtualbox/pybox/consts/env.py @@ -0,0 +1,27 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import getpass +from sys import platform +import os + +user = getpass.getuser() + +if platform == 'win32' or platform == 'win64': + LOGPATH = 'C:\\Temp\\pybox_logs' + PORT = 10000 +else: + homedir = os.environ['HOME'] + LOGPATH = '{}/vbox_installer_logs'.format(homedir) + +class Lab: + VBOX = { + 'floating_ip': '10.10.10.7', + 'controller-0_ip': '10.10.10.8', + 'controller-1_ip': '10.10.10.9', + 'username': 'wrsroot', + 'password': 'Li69nux*', + } diff --git a/deployment/virtualbox/pybox/consts/networking.py b/deployment/virtualbox/pybox/consts/networking.py new file mode 100644 index 00000000..1c70bcce --- /dev/null +++ b/deployment/virtualbox/pybox/consts/networking.py @@ -0,0 +1,78 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from sys import platform + + +class Subnets: + IPV4 = { + 'mgmt_subnet': '192.168.204.0/24', + 'infra_subnet': '192.168.205.0/24', + 'oam_subnet': '10.10.10.0/24' + } + + IPV6 = { + 'mgmt_subnet': 'aefd::/64', + 'infra_subnet': 'aced::/64', + 'oam_subnet': 'abcd::/64' + } + + +class NICs: + if platform == 'win32' or platform == 'win64': + + CONTROLLER = { + 'node_type': 'controller', + '1': {'nic': 'hostonly', 'intnet': '', 'nictype': '82540EM', 'nicpromisc': 'deny', 'hostonlyadapter': 'VirtualBox Host-Only Ethernet Adapter'}, + '2': {'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '3': {'nic': 'intnet', 'intnet': 'intnet-data1', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '4': {'nic': 'intnet', 'intnet': 'intnet-data2', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + } + else: + CONTROLLER = { + 'node_type': 'controller', + '1': {'nic': 'hostonly', 'intnet': '', 'nictype': '82540EM', 'nicpromisc': 'deny', 'hostonlyadapter': 'vboxnet0'}, + '2': {'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '3': {'nic': 'intnet', 'intnet': 'intnet-data1', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '4': {'nic': 'intnet', 'intnet': 'intnet-data2', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + } + + COMPUTE = { + 'node_type': 'compute', + '1': {'nic': 'intnet', 'intnet': 'intnet-unused1', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '2': {'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '3': {'nic': 'intnet', 'intnet': 'intnet-data1', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '4': {'nic': 'intnet', 'intnet': 'intnet-data2', 'nictype': 'virtio', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + } + + STORAGE = { + 'node_type': 'storage', + '1': {'nic': 'intnet', 'intnet': 'intnet-unused', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '2': {'nic': 'intnet', 'intnet': 'intnet-management', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + '3': {'nic': 'intnet', 'intnet': 'intnet-infra', 'nictype': '82540EM', 'nicpromisc': 'allow-all', 'hostonlyadapter': 'none'}, + } + +class OAM: + OAM = { + 'ip': '10.10.10.254', + 'netmask': '255.255.255.0', + } + +class Serial: + if platform == 'win32' or platform == 'win64': + SERIAL = { + 'uartbase': '0x3F8', + 'uartport': '4', + 'uartmode': 'tcpserver', + 'uartpath': '10000' + } + else: + SERIAL = { + 'uartbase': '0x3F8', + 'uartport': '4', + 'uartmode': 'server', + 'uartpath': '/tmp/' + } diff --git a/deployment/virtualbox/pybox/consts/node.py b/deployment/virtualbox/pybox/consts/node.py new file mode 100644 index 00000000..a53f97c4 --- /dev/null +++ b/deployment/virtualbox/pybox/consts/node.py @@ -0,0 +1,79 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class Nodes: + CONTROLLER_CEPH = { + 'node_type': 'controller-STORAGE', + 'memory': 12288, + 'cpus': 5, + 'disks': { + 1:[240000], + 2:[240000, 10000], + 3:[240000, 10000, 10000], + 4:[240000, 10000, 10000, 10000], + 5:[240000, 10000, 10000, 10000, 10000], + 6:[240000, 10000, 10000, 10000, 10000, 10000], + 7:[240000, 10000, 10000, 10000, 10000, 10000, 10000] + } + } + + CONTROLLER_LVM = { + 'node_type': 'controller-STANDARD', + 'memory': 12288, + 'cpus': 5, + 'disks': { + 1:[240000], + 2:[240000, 10000], + 3:[240000, 10000, 10000], + 4:[240000, 10000, 10000, 10000], + 5:[240000, 10000, 10000, 10000, 10000], + 6:[240000, 10000, 10000, 10000, 10000, 10000], + 7:[240000, 10000, 10000, 10000, 10000, 10000, 10000] + } + } + + CONTROLLER_AIO = { + 'node_type': 'controller-AIO', + 'memory': 20000, + 'cpus': 8, + 'disks': { + 1:[240000], + 2:[240000, 10000], + 3:[240000, 10000, 10000], + 4:[240000, 10000, 10000, 10000], + 5:[240000, 10000, 10000, 10000, 10000], + 6:[240000, 10000, 10000, 10000, 10000, 10000], + 7:[240000, 10000, 10000, 10000, 10000, 10000, 10000] + } + } + + COMPUTE = { + 'node_type': 'compute', + 'memory': 4096, + 'cpus': 3, + 'disks': { + 1:[50000], + 2:[50000, 10000], + 3:[50000, 10000, 10000], + 4:[50000, 10000, 10000, 10000], + 5:[50000, 10000, 10000, 10000, 10000], + 6:[50000, 10000, 10000, 10000, 10000, 10000], + 7:[50000, 10000, 10000, 10000, 10000, 10000, 10000] + } + } + + STORAGE = { + 'node_type': 'storage', + 'memory': 3072, + 'cpus': 3, + 'disks': { + 1:[50000], + 2:[50000, 10000], + 3:[50000, 10000, 10000], + 4:[50000, 10000, 10000, 10000], + 5:[50000, 10000, 10000, 10000, 10000] + } + } diff --git a/deployment/virtualbox/pybox/consts/timeout.py b/deployment/virtualbox/pybox/consts/timeout.py new file mode 100644 index 00000000..61e4dcca --- /dev/null +++ b/deployment/virtualbox/pybox/consts/timeout.py @@ -0,0 +1,15 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class HostTimeout: + CONTROLLER_UNLOCK = 3600+1800 + REBOOT = 900 + INSTALL = 3600 + LAB_INSTALL = 3600 + HOST_INSTALL = 3600 + LAB_CONFIG = 5400 + INSTALL_PATCHES = 900 + NETWORKING_OPERATIONAL = 60 diff --git a/deployment/virtualbox/pybox/helper/__init__.py b/deployment/virtualbox/pybox/helper/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deployment/virtualbox/pybox/helper/host_helper.py b/deployment/virtualbox/pybox/helper/host_helper.py new file mode 100644 index 00000000..ae4540e7 --- /dev/null +++ b/deployment/virtualbox/pybox/helper/host_helper.py @@ -0,0 +1,160 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +import streamexpect +from consts.timeout import HostTimeout +from utils import serial +from utils.install_log import LOG + + +def unlock_host(stream, hostname): + """ + Unlocks given host + Args: + stream(stream): Stream to active controller + hostname(str): Name of host to unlock + Steps: + - Check that host is locked + - Unlock host + """ + LOG.info("#### Unlock %s", hostname) + serial.send_bytes(stream, "system host-list | grep {}".format(hostname), expect_prompt=False) + try: + serial.expect_bytes(stream, "locked") + except streamexpect.ExpectTimeout: + LOG.info("Host %s not locked", hostname) + return 1 + serial.send_bytes(stream, "system host-unlock {}".format(hostname), expect_prompt=False) + LOG.info("Unlocking %s", hostname) + + +def lock_host(stream, hostname): + """ + Locks the specified host. + Args: + stream(stream): Stream to controller-0 + hostname(str): Name of host to lock + Steps: + - Check that host is unlocked + - Lock host + """ + LOG.info("Lock %s", hostname) + serial.send_bytes(stream, "system host-list |grep {}".format(hostname), expect_prompt=False) + try: + serial.expect_bytes(stream, "unlocked") + except streamexpect.ExpectTimeout: + LOG.info("Host %s not unlocked", hostname) + return 1 + serial.send_bytes(stream, "system host-lock {}".format(hostname), expect_prompt="keystone") + LOG.info("Locking %s", hostname) + + +def reboot_host(stream, hostname): + """ + Reboots host specified + Args: + stream(): + hostname(str): Host to reboot + """ + LOG.info("Rebooting %s", hostname) + serial.send_bytes(stream, "system host-reboot {}".format(hostname), expect_prompt=False) + serial.expect_bytes(stream, "rebooting", HostTimeout.REBOOT) + + +def install_host(stream, hostname, host_type, host_id): + """ + Initiates install of specified host. Requires controller-0 to be installed already. + Args: + stream(stream): Stream to cont0 + hostname(str): Name of host + host_type(str): Type of host being installed e.g. 'storage' or 'compute' + host_id(int): id to identify host + """ + + time.sleep(10) + LOG.info("Installing %s with id %s", hostname, host_id) + if host_type is 'controller': + serial.send_bytes(stream, + "system host-update {} personality=controller".format(host_id), + expect_prompt=False) + elif host_type is 'storage': + serial.send_bytes(stream, + "system host-update {} personality=storage".format(host_id), + expect_prompt=False) + else: + serial.send_bytes(stream, + "system host-update {} personality=compute hostname={}".format(host_id, + hostname), + expect_prompt=False) + time.sleep(30) + + +def disable_logout(stream): + """ + Disables automatic logout of users. + Args: + stream(stream): stream to cont0 + """ + LOG.info('Disabling automatic logout') + serial.send_bytes(stream, "export TMOUT=0") + + +def change_password(stream, username="wrsroot", password="Li69nux*"): + """ + changes the default password on initial login. + Args: + stream(stream): stream to cont0 + + """ + LOG.info('Changing password to Li69nux*') + serial.send_bytes(stream, username, expect_prompt=False) + serial.expect_bytes(stream, "Password:") + serial.send_bytes(stream, username, expect_prompt=False) + serial.expect_bytes(stream, "UNIX password:") + serial.send_bytes(stream, username, expect_prompt=False) + serial.expect_bytes(stream, "New password:") + serial.send_bytes(stream, password, expect_prompt=False) + serial.expect_bytes(stream, "Retype new") + serial.send_bytes(stream, password) + + +def login(stream, timeout=600, username="wrsroot", password="Li69nux*"): + """ + Logs into controller-0. + Args: + stream(stream): stream to cont0 + timeout(int): Time before login fails in seconds. + """ + + serial.send_bytes(stream, "\n", expect_prompt=False) + rc = serial.expect_bytes(stream, "ogin:", fail_ok=True, timeout=timeout) + if rc != 0: + serial.send_bytes(stream, "\n", expect_prompt=False) + if serial.expect_bytes(stream, "~$", timeout=10, fail_ok=True) == -1: + serial.send_bytes(stream, '\n', expect_prompt=False) + serial.expect_bytes(stream, "keystone", timeout=10) + else: + serial.send_bytes(stream, username, expect_prompt=False) + serial.expect_bytes(stream, "assword:") + serial.send_bytes(stream, password) + disable_logout(stream) + + +def logout(stream): + """ + Logs out of controller-0. + Args: + stream(stream): stream to cont0 + """ + serial.send_bytes(stream, "exit", expect_prompt=False) + time.sleep(5) + + +def check_password(stream, password="Li69nux*"): + ret = serial.expect_bytes(stream, 'assword', fail_ok=True, timeout=5) + if ret == 0: + serial.send_bytes(stream, password, expect_prompt=False) diff --git a/deployment/virtualbox/pybox/helper/install_lab.py b/deployment/virtualbox/pybox/helper/install_lab.py new file mode 100644 index 00000000..9c1786bf --- /dev/null +++ b/deployment/virtualbox/pybox/helper/install_lab.py @@ -0,0 +1,53 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Contains helper functions that will configure basic system settings. +""" + +from consts.timeout import HostTimeout + +from helper import host_helper + +from utils import serial +from utils.install_log import LOG + +def update_platform_cpus(stream, hostname, cpu_num=5): + """ + Update platform CPU allocation. + """ + + LOG.info("Allocating %s CPUs for use by the %s platform.", cpu_num, hostname) + serial.send_bytes(stream, "\nsource /etc/platform/openrc; system host-cpu-modify " + "{} -f platform -p0 {}".format(hostname, cpu_num, + prompt='keystone', timeout=300)) + +def set_dns(stream, dns_ip): + """ + Perform DNS configuration on the system. + """ + + LOG.info("Configuring DNS to %s.", dns_ip) + serial.send_bytes(stream, "source /etc/nova/openrc; system dns-modify " + "nameservers={}".format(dns_ip), prompt='keystone') + + +def config_controller(stream, config_file=None, password='Li69nux*'): + """ + Configure controller-0 using optional arguments + """ + + args = '' + if config_file: + args += '--config-file ' + config_file + ' ' + + serial.send_bytes(stream, "sudo config_controller {}".format(args), expect_prompt=False) + host_helper.check_password(stream, password=password) + ret = serial.expect_bytes(stream, "unlock controller to proceed.", + timeout=HostTimeout.LAB_CONFIG) + if ret != 0: + LOG.info("Configuration failed. Exiting installer.") + raise Exception("Configcontroller failed") + diff --git a/deployment/virtualbox/pybox/helper/vboxmanage.py b/deployment/virtualbox/pybox/helper/vboxmanage.py new file mode 100644 index 00000000..7f5e060c --- /dev/null +++ b/deployment/virtualbox/pybox/helper/vboxmanage.py @@ -0,0 +1,490 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import subprocess +import re +import getpass +import time + +from sys import platform +from consts import env + +from utils.install_log import LOG + + +def vboxmanage_version(): + """ + Return version of vbox. + """ + + version = subprocess.check_output(['vboxmanage', '--version'], stderr=subprocess.STDOUT) + + return version + + +def vboxmanage_extpack(action="install"): + """ + This allows you to install, uninstall the vbox extensions" + """ + output = vboxmanage_version() + version = re.match(b'(.*)r', output) + version_path = version.group(1).decode('utf-8') + + LOG.info("Downloading extension pack") + filename = 'Oracle_VM_VirtualBox_Extension_Pack-{}.vbox-extpack'.format(version_path) + cmd = 'http://download.virtualbox.org/virtualbox/{}/{}'.format(version_path, filename) + result = subprocess.check_output(['wget', cmd, '-P', '/tmp'], stderr=subprocess.STDOUT) + LOG.info(result) + + LOG.info("Installing extension pack") + result = subprocess.check_output(['vboxmanage', 'extpack', 'install', '/tmp/' + filename, + '--replace'], stderr=subprocess.STDOUT) + LOG.info(result) + + +def get_all_vms(labname, option="vms"): + initial_node_list = [] + vm_list = vboxmanage_list(option) + + labname.encode('utf-8') + # Reduce the number of VMs we query + for item in vm_list: + if labname.encode('utf-8') in item and (b'controller-' in item or \ + b'compute-' in item or b'storage-' in item): + initial_node_list.append(item.decode('utf-8')) + + # Filter by group + node_list = [] + group = bytearray('"/{}"'.format(labname), 'utf-8') + for item in initial_node_list: + info = vboxmanage_showinfo(item).splitlines() + for line in info: + try: + k, v = line.split(b'=') + except ValueError: + continue + if k == b'groups' and v == group: + node_list.append(item) + + return node_list + + +def take_snapshot(labname, snapshot_name, socks=None): + vms = get_all_vms(labname, option="vms") + runningvms = get_all_vms(labname, option="runningvms") + + LOG.info("#### Taking snapshot %s of lab %s", snapshot_name, labname) + LOG.info("VMs in lab %s: %s", labname, vms) + LOG.info("VMs running in lab %s: %s", labname, runningvms) + + hosts = len(vms) + + # Pause running VMs to take snapshot + if len(runningvms) > 1: + for node in runningvms: + newpid = os.fork() + if newpid == 0: + vboxmanage_controlvms([node], "pause") + os._exit(0) + for node in vms: + os.waitpid(0, 0) + time.sleep(2) + + if hosts != 0: + vboxmanage_takesnapshot(vms, snapshot_name) + + # Resume VMs after snapshot was taken + if len(runningvms) > 1: + for node in runningvms: + newpid = os.fork() + if newpid == 0: + vboxmanage_controlvms([node], "resume") + os._exit(0) + for node in runningvms: + os.waitpid(0, 0) + + time.sleep(10) # Wait for VM serial port to stabilize, otherwise it may refuse to connect + + if runningvms: + new_vms = get_all_vms(labname, option="runningvms") + retry = 0 + while retry < 20: + LOG.info("Waiting for VMs to come up running after taking snapshot..." + "Up VMs are %s ", new_vms) + if len(runningvms) < len(new_vms): + time.sleep(1) + new_vms = get_all_vms(labname, option="runningvms") + retry += 1 + else: + LOG.info("All VMs %s are up running after taking snapshot...", vms) + break + + +def restore_snapshot(node_list, name): + LOG.info("Restore snapshot of %s for hosts %s", name, node_list) + if len(node_list) != 0: + vboxmanage_controlvms(node_list, "poweroff") + time.sleep(5) + if len(node_list) != 0: + for host in node_list: + vboxmanage_restoresnapshot(host, name) + time.sleep(5) + for host in node_list: + if "controller-0" not in host: + vboxmanage_startvm(host) + time.sleep(10) + for host in node_list: + if "controller-0" in host: + vboxmanage_startvm(host) + time.sleep(10) + + +def vboxmanage_list(option="vms"): + """ + This returns a list of vm names. + """ + result = subprocess.check_output(['vboxmanage', 'list', option], stderr=subprocess.STDOUT) + vms_list = [] + for item in result.splitlines(): + vm_name = re.match(b'"(.*?)"', item) + vms_list.append(vm_name.group(1)) + + return vms_list + + +def vboxmanage_showinfo(host): + """ + This returns info about the host + """ + if not isinstance(host, str): + host.decode('utf-8') + result = subprocess.check_output(['vboxmanage', 'showvminfo', host, '--machinereadable'], + stderr=subprocess.STDOUT) + return result + + +def vboxmanage_createvm(hostname, labname): + """ + This creates a VM with the specified name. + """ + + assert hostname, "Hostname is required" + assert labname, "Labname is required" + group = "/" + labname + LOG.info("Creating VM %s", hostname) + result = subprocess.check_output(['vboxmanage', 'createvm', '--name', hostname, '--register', + '--ostype', 'Linux_64', '--groups', group], + stderr=subprocess.STDOUT) + +def vboxmanage_deletevms(hosts=None): + """ + Deletes a list of VMs + """ + + assert hosts, "A list of hostname(s) is required" + + if len(hosts) != 0: + for hostname in hosts: + LOG.info("Deleting VM %s", hostname) + result = subprocess.check_output(['vboxmanage', 'unregistervm', hostname, '--delete'], + stderr=subprocess.STDOUT) + time.sleep(10) + # in case medium is still present after delete + vboxmanage_deletemedium(hostname) + + vms_list = vboxmanage_list("vms") + for items in hosts: + assert items not in vms_list, "The following vms are unexpectedly" \ + "present {}".format(vms_list) + + +def vboxmanage_hostonlyifcreate(name="vboxnet0", ip=None, netmask=None): + """ + This creates a hostonly network for systems to communicate. + """ + + assert name, "Must provide network name" + assert ip, "Must provide an OAM IP" + assert netmask, "Must provide an OAM Netmask" + + LOG.info("Creating Host-only Network") + + result = subprocess.check_output(['vboxmanage', 'hostonlyif', 'create'], + stderr=subprocess.STDOUT) + + LOG.info("Provisioning %s with IP %s and Netmask %s", name, ip, netmask) + result = subprocess.check_output(['vboxmanage', 'hostonlyif', 'ipconfig', name, '--ip', + ip, '--netmask', netmask], stderr=subprocess.STDOUT) + + +def vboxmanage_hostonlyifdelete(name="vboxnet0"): + """ + Deletes hostonly network. This is used as a work around for creating too many hostonlyifs. + + """ + assert name, "Must provide network name" + LOG.info("Removing Host-only Network") + result = subprocess.check_output(['vboxmanage', 'hostonlyif', 'remove', name], + stderr=subprocess.STDOUT) + + +def vboxmanage_modifyvm(hostname=None, cpus=None, memory=None, nic=None, + nictype=None, nicpromisc=None, nicnum=None, + intnet=None, hostonlyadapter=None, + natnetwork=None, uartbase=None, uartport=None, + uartmode=None, uartpath=None, nicbootprio2=1, prefix=""): + """ + This modifies a VM with a specified name. + """ + + assert hostname, "Hostname is required" + # Add more semantic checks + cmd = ['vboxmanage', 'modifyvm', hostname] + if cpus: + cmd.extend(['--cpus', cpus]) + if memory: + cmd.extend(['--memory', memory]) + if nic and nictype and nicpromisc and nicnum: + cmd.extend(['--nic{}'.format(nicnum), nic]) + cmd.extend(['--nictype{}'.format(nicnum), nictype]) + cmd.extend(['--nicpromisc{}'.format(nicnum), nicpromisc]) + if intnet: + if prefix: + intnet = "{}-{}".format(prefix, intnet) + else: + intnet = "{}".format(intnet) + cmd.extend(['--intnet{}'.format(nicnum), intnet]) + if hostonlyadapter: + cmd.extend(['--hostonlyadapter{}'.format(nicnum), hostonlyadapter]) + if natnetwork: + cmd.extend(['--nat-network{}'.format(nicnum), natnetwork]) + elif nicnum and nictype == 'nat': + cmd.extend(['--nic{}'.format(nicnum), 'nat']) + if uartbase and uartport and uartmode and uartpath: + cmd.extend(['--uart1']) + cmd.extend(['{}'.format(uartbase)]) + cmd.extend(['{}'.format(uartport)]) + cmd.extend(['--uartmode1']) + cmd.extend(['{}'.format(uartmode)]) + if platform == 'win32' or platform == 'win64': + cmd.extend(['{}'.format(env.PORT)]) + env.PORT += 1 + else: + if prefix: + prefix = "{}_".format(prefix) + if 'controller-0' in hostname: + cmd.extend(['{}{}{}_serial'.format(uartpath, prefix, hostname)]) + else: + cmd.extend(['{}{}{}'.format(uartpath, prefix, hostname)]) + if nicbootprio2: + cmd.extend(['--nicbootprio2']) + cmd.extend(['{}'.format(nicbootprio2)]) + cmd.extend(['--boot4']) + cmd.extend(['net']) + LOG.info(cmd) + + LOG.info("Updating VM %s configuration", hostname) + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + +def vboxmanage_port_forward(hostname, network, local_port, guest_port, guest_ip): + # VBoxManage natnetwork modify --netname natnet1 --port-forward-4 + # "ssh:tcp:[]:1022:[192.168.15.5]:22" + rule_name = "{}-{}".format(hostname, guest_port) + # Delete previous entry, if any + LOG.info("Removing previous forwarding rule '%s' from NAT network '%s'", rule_name, network) + cmd = ['vboxmanage', 'natnetwork', 'modify', '--netname', network, + '--port-forward-4', 'delete', rule_name] + try: + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + pass + + # Add new rule + rule = "{}:tcp:[]:{}:[{}]:{}".format(rule_name, local_port, guest_ip, guest_port) + LOG.info("Updating port-forwarding rule to: %s", rule) + cmd = ['vboxmanage', 'natnetwork', 'modify', '--netname', network, '--port-forward-4', rule] + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + +def vboxmanage_storagectl(hostname=None, storectl="sata", hostiocache="off"): + """ + This creates a storage controller on the host. + """ + + assert hostname, "Hostname is required" + assert storectl, "Type of storage controller is required" + LOG.info("Creating %s storage controller on VM %s", storectl, hostname) + result = subprocess.check_output(['vboxmanage', 'storagectl', + hostname, '--name', storectl, + '--add', storectl, '--hostiocache', + hostiocache], stderr=subprocess.STDOUT) + + +def vboxmanage_storageattach(hostname=None, storectl="sata", + storetype="hdd", disk=None, port_num="0", device_num="0"): + """ + This attaches a disk to a controller. + """ + + assert hostname, "Hostname is required" + assert disk, "Disk name is required" + assert storectl, "Name of storage controller is required" + assert storetype, "Type of storage controller is required" + LOG.info("Attaching %s storage to storage controller %s on VM %s", + storetype, storectl, hostname) + result = subprocess.check_output(['vboxmanage', 'storageattach', + hostname, '--storagectl', storectl, + '--medium', disk, '--type', + storetype, '--port', port_num, + '--device', device_num], stderr=subprocess.STDOUT) + return result + +def vboxmanage_deletemedium(hostname, vbox_home_dir='/home'): + assert hostname, "Hostname is required" + + if platform == 'win32' or platform == 'win64': + return + + username = getpass.getuser() + vbox_home_dir = "{}/{}/vbox_disks/".format(vbox_home_dir, username) + + disk_list = [f for f in os.listdir(vbox_home_dir) if + os.path.isfile(os.path.join(vbox_home_dir, f)) and hostname in f] + LOG.info("Disk mediums to delete: %s", disk_list) + for disk in disk_list: + LOG.info("Disconnecting disk %s from vbox.", disk) + try: + result = subprocess.check_output(['vboxmanage', 'closemedium', 'disk', + "{}{}".format(vbox_home_dir, disk), '--delete'], + stderr=subprocess.STDOUT) + LOG.info(result) + except subprocess.CalledProcessError as e: + # Continue if failures, disk may not be present + LOG.info("Error disconnecting disk, continuing. " + "Details: stdout: %s stderr: %s", e.stdout, e.stderr) + LOG.info("Removing backing file %s", disk) + try: + os.remove("{}{}".format(vbox_home_dir, disk)) + except: + pass + + +def vboxmanage_createmedium(hostname=None, disk_list=None, vbox_home_dir='/home'): + """ + This creates the required disks. + """ + + assert hostname, "Hostname is required" + assert disk_list, "A list of disk sizes is required" + + username = getpass.getuser() + device_num = 0 + port_num = 0 + disk_count = 1 + for disk in disk_list: + if platform == 'win32' or platform == 'win64': + file_name = "C:\\Users\\" + username + "\\vbox_disks\\" + \ + hostname + "_disk_{}".format(disk_count) + else: + file_name = vbox_home_dir + '/' + username + "/vbox_disks/" \ + + hostname + "_disk_{}".format(disk_count) + LOG.info("Creating disk %s of size %s on VM %s on device %s port %s", + file_name, disk, hostname, device_num, port_num) + + try: + result = subprocess.check_output(['vboxmanage', 'createmedium', + 'disk', '--size', str(disk), + '--filename', file_name, + '--format', 'vdi', + '--variant', 'standard'], + stderr=subprocess.STDOUT) + LOG.info(result) + except subprocess.CalledProcessError as e: + LOG.info("Error stdout: %s stderr: %s", e.stdout, e.stderr) + raise + vboxmanage_storageattach(hostname, "sata", "hdd", file_name + \ + ".vdi", str(port_num), str(device_num)) + disk_count += 1 + port_num += 1 + time.sleep(5) + + +def vboxmanage_startvm(hostname=None, force=False): + """ + This allows you to power on a VM. + """ + + assert hostname, "Hostname is required" + + if not force: + LOG.info("Check if VM is running") + running_vms = vboxmanage_list(option="runningvms") + else: + running_vms = [] + + if hostname.encode('utf-8') in running_vms: + LOG.info("Host %s is already started", hostname) + else: + LOG.info("Powering on VM %s", hostname) + result = subprocess.check_output(['vboxmanage', 'startvm', + hostname], stderr=subprocess.STDOUT) + LOG.info(result) + + # Wait for VM to start + tmout = 20 + while tmout: + tmout -= 1 + running_vms = vboxmanage_list(option="runningvms") + if hostname.encode('utf-8') in running_vms: + break + time.sleep(1) + else: + raise "Failed to start VM: {}".format(hostname) + LOG.info("VM '%s' started.", hostname) + + +def vboxmanage_controlvms(hosts=None, action=None): + """ + This allows you to control a VM, e.g. pause, resume, etc. + """ + + assert hosts, "Hostname is required" + assert action, "Need to provide an action to execute" + + for host in hosts: + LOG.info("Executing %s action on VM %s", action, host) + result = subprocess.call(["vboxmanage", "controlvm", host, + action], stderr=subprocess.STDOUT) + time.sleep(1) + + +def vboxmanage_takesnapshot(hosts=None, name=None): + """ + This allows you to take snapshot of VMs. + """ + + assert hosts, "Hostname is required" + assert name, "Need to provide a name for the snapshot" + + for host in hosts: + LOG.info("Taking snapshot %s on VM %s", name, host) + result = subprocess.call(["vboxmanage", "snapshot", host, "take", + name], stderr=subprocess.STDOUT) + + +def vboxmanage_restoresnapshot(host=None, name=None): + """ + This allows you to restore snapshot of a VM. + """ + + assert host, "Hostname is required" + assert name, "Need to provide the snapshot to restore" + + LOG.info("Restoring snapshot %s on VM %s", name, host) + result = subprocess.call(["vboxmanage", "snapshot", host, "restore", + name], stderr=subprocess.STDOUT) + time.sleep(10) + diff --git a/deployment/virtualbox/pybox/install_vbox.py b/deployment/virtualbox/pybox/install_vbox.py new file mode 100755 index 00000000..8f69dff2 --- /dev/null +++ b/deployment/virtualbox/pybox/install_vbox.py @@ -0,0 +1,1608 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +This tool is an automated installer to allow users to easily install +StarlingX on VirtualBox. +""" + +import subprocess +import configparser +import getpass +import time +import re +import tempfile +import signal +import sys +from sys import platform +import paramiko +import streamexpect + +from utils import kpi, serial +from utils.install_log import init_logging, get_log_dir, LOG +from utils.sftp import sftp_send, send_dir + +from helper import vboxmanage +from helper import install_lab +from helper import host_helper + +from consts import env +from consts.node import Nodes +from consts.networking import NICs, OAM, Serial +from consts.timeout import HostTimeout + +from Parser import handle_args + + +# Global vars +vboxoptions = None + + +def menu_selector(stream, setup_type, + securityprofile, lowlatency, install_mode='serial'): + """ + Select the correct install option. + """ + + # Wait for menu to load (add sleep so we can see what is picked) + serial.expect_bytes(stream, "Press") + # Pick install type + if setup_type in [AIO_SX, AIO_DX]: + LOG.info("Selecting AIO controller") + serial.send_bytes(stream, "\033[B", expect_prompt=False, send=False) + if lowlatency is True: + LOG.info("Selecting low latency controller") + serial.send_bytes(stream, "\033[B", expect_prompt=False, send=False) + serial.send_bytes(stream, "\n", expect_prompt=False, send=False) + time.sleep(4) + # Serial or Graphical menu (picking Serial by default) + if install_mode == "graphical": + LOG.info("Selecting Graphical menu") + serial.send_bytes(stream, "\033[B", expect_prompt=False, send=False) + else: + LOG.info("Selecting Serial menu") + serial.send_bytes(stream, "\n", expect_prompt=False, send=False) + time.sleep(6) + # Security profile menu + if securityprofile == "extended": + LOG.info("Selecting extended security profile") + serial.send_bytes(stream, "\033[B", expect_prompt=False, send=False) + time.sleep(2) + serial.send_bytes(stream, "\n", expect_prompt=False, send=False) + time.sleep(4) + + +def setup_networking(stream, ctrlr0_ip, gateway_ip, password='Li69nux*'): + """ + Setup initial networking so we can transfer files. + """ + ip = ctrlr0_ip + interface = "enp0s3" + ret = serial.send_bytes( + stream, + "/sbin/ip address list", + prompt=ctrlr0_ip, + fail_ok=True, + timeout=10) + if ret != 0: + LOG.info("Setting networking up.") + else: + LOG.info("Skipping networking setup") + return + LOG.info("%s being set up with ip %s", interface, ip) + serial.send_bytes(stream, + "sudo /sbin/ip addr add {}/24 dev {}".format(ip, interface), + expect_prompt=False) + host_helper.check_password(stream, password=password) + time.sleep(2) + serial.send_bytes(stream, + "sudo /sbin/ip link set {} up".format(interface), + expect_prompt=False) + host_helper.check_password(stream, password=password) + time.sleep(2) + serial.send_bytes(stream, + "sudo route add default gw {}".format(gateway_ip), + expect_prompt=False) + host_helper.check_password(stream, password=password) + + if vboxoptions.vboxnet_type == 'hostonly': + LOG.info("Pinging controller-0 at: %s...", ip) + tmout = HostTimeout.NETWORKING_OPERATIONAL + while tmout: + # Ping from machine hosting virtual box to virtual machine + rc = subprocess.call(['ping', '-c', '1', ip]) + if rc == 0: + break + tmout -= 1 + else: + raise "Failed to establish connection in {}s " \ + "to controller-0 at: {}!" + LOG.info("Ping succeeded!") + + +def fix_networking(stream, release, password='Li69nux*'): + """ + Vbox/linux bug: Sometimes after resuming a VM networking fails to comes up. + Setting VM interface down then up again fixes it. + """ + if release == "R2": + interface = "eth0" + else: + interface = "enp0s3" + LOG.info("Fixing networking ...") + serial.send_bytes(stream, + "sudo /sbin/ip link set {} down".format(interface), + expect_prompt=False) + host_helper.check_password(stream, password=password) + time.sleep(1) + serial.send_bytes( + stream, + "sudo /sbin/ip link set {} up".format(interface), + expect_prompt=False) + host_helper.check_password(stream, password=password) + time.sleep(2) + + +def install_controller_0(cont0_stream, setup_type, securityprofile, lowlatency, + install_mode, ctrlr0_ip, gateway_ip, + username='wrsroot', password='Li69nux*'): + """ + Installation of controller-0. + """ + LOG.info("Starting installation of controller-0") + start_time = time.time() + menu_selector( + cont0_stream, + setup_type, + securityprofile, + lowlatency, + install_mode) + + try: + serial.expect_bytes( + cont0_stream, + "login:", + timeout=HostTimeout.INSTALL) + except Exception as e: + LOG.info("Connection failed for controller-0 with %s", e) + # Sometimes we get UnicodeDecodeError exception due to the output + # of installation. So try one more time maybe + LOG.info("So ignore the exception and wait for controller-0 to be installed again.") + if HostTimeout.INSTALL > (time.time() - start_time): + serial.expect_bytes( + cont0_stream, + "login:", + timeout=HostTimeout.INSTALL - (time.time() - start_time)) + + LOG.info("Completed installation of controller-0.") + # Change password on initial login + time.sleep(20) + host_helper.change_password( + cont0_stream, + username=username, + password=password) + # Disable user logout + time.sleep(10) + host_helper.disable_logout(cont0_stream) + # Setup basic networking + time.sleep(1) + setup_networking(cont0_stream, ctrlr0_ip, gateway_ip, password=password) + + +def delete_lab(labname, force=False): + """ + This allows for the deletion of an existing lab. + """ + + node_list = vboxmanage.get_all_vms(labname, option="vms") + + if len(node_list) != 0: + if not force: + LOG.info("This will delete lab %s with vms: %s", labname, node_list) + LOG.info("Continue? (y/N)") + while True: + choice = input().lower() + if choice == 'y': + break + else: + LOG.info("Aborting!") + exit(1) + LOG.info("#### Deleting lab %s.", labname) + LOG.info("VMs in lab: %s.", node_list) + vboxmanage.vboxmanage_controlvms(node_list, "poweroff") + time.sleep(2) + vboxmanage.vboxmanage_deletevms(node_list) + + +def get_disk_sizes(comma_list): + """ + Return the disk sizes as taken from the command line. + """ + sizes = comma_list.split(',') + for size in sizes: + try: + val = int(size) + if val < 0: + raise Exception() + except: + LOG.info("Disk sizes must be a comma separated list of positive integers.") + raise Exception("Disk sizes must be a comma separated list of positive integers.") + return sizes + + +def create_lab(vboxoptions): + """ + Creates vms using the arguments in vboxoptions. + """ + + # Pull in node configuration + node_config = [getattr(Nodes, attr) + for attr in dir(Nodes) if not attr.startswith('__')] + nic_config = [getattr(NICs, attr) + for attr in dir(NICs) if not attr.startswith('__')] + oam_config = [getattr(OAM, attr) + for attr in dir(OAM) if not attr.startswith('__')][0] + serial_config = [getattr(Serial, attr) + for attr in dir(Serial) if not attr.startswith('__')] + + # Create nodes list + nodes_list = [] + + if vboxoptions.controllers: + for node_id in range(0, vboxoptions.controllers): + node_name = vboxoptions.labname + "-controller-{}".format(node_id) + nodes_list.append(node_name) + if vboxoptions.workers: + for node_id in range(0, vboxoptions.workers): + node_name = vboxoptions.labname + "-worker-{}".format(node_id) + nodes_list.append(node_name) + if vboxoptions.storages: + for node_id in range(0, vboxoptions.storages): + node_name = vboxoptions.labname + "-storage-{}".format(node_id) + nodes_list.append(node_name) + + LOG.info("#### We will create the following nodes: %s", nodes_list) + port = 10000 + for node in nodes_list: + LOG.info("#### Creating node: %s", node) + vboxmanage.vboxmanage_createvm(node, vboxoptions.labname) + vboxmanage.vboxmanage_storagectl( + node, + storectl="sata", + hostiocache=vboxoptions.hostiocache) + disk_sizes = None + no_disks = 0 + if "controller" in node: + if vboxoptions.setup_type in [AIO_DX, AIO_SX]: + node_type = "controller-AIO" + else: + node_type = "controller-{}".format(vboxoptions.setup_type) + if vboxoptions.controller_disk_sizes: + disk_sizes = get_disk_sizes(vboxoptions.controller_disk_sizes) + else: + no_disks = vboxoptions.controller_disks + elif "worker" in node: + node_type = "worker" + if vboxoptions.worker_disk_sizes: + disk_sizes = get_disk_sizes(vboxoptions.worker_disk_sizes) + else: + no_disks = vboxoptions.worker_disks + elif "storage" in node: + node_type = "storage" + if vboxoptions.storage_disk_sizes: + disk_sizes = get_disk_sizes(vboxoptions.storage_disk_sizes) + else: + no_disks = vboxoptions.storage_disks + for item in node_config: + if item['node_type'] == node_type: + vboxmanage.vboxmanage_modifyvm( + node, + cpus=str(item['cpus']), + memory=str(item['memory'])) + if not disk_sizes: + disk_sizes = item['disks'][no_disks] + vboxmanage.vboxmanage_createmedium(node, disk_sizes, + vbox_home_dir=vboxoptions.vbox_home_dir) + if platform == 'win32' or platform == 'win64': + vboxmanage.vboxmanage_modifyvm( + node, uartbase=serial_config[0]['uartbase'], + uartport=serial_config[ + 0]['uartport'], + uartmode=serial_config[ + 0]['uartmode'], + uartpath=port) + port += 1 + else: + vboxmanage.vboxmanage_modifyvm( + node, uartbase=serial_config[0]['uartbase'], + uartport=serial_config[ + 0]['uartport'], + uartmode=serial_config[ + 0]['uartmode'], + uartpath=serial_config[ + 0]['uartpath'], + prefix=vboxoptions.userid) + + if "controller" in node: + node_type = "controller" + + last_adapter = 1 + for item in nic_config: + if item['node_type'] == node_type: + for adapter in item.keys(): + if adapter.isdigit(): + last_adapter += 1 + data = item[adapter] + if vboxoptions.vboxnet_name is not 'none' and data['nic'] is 'hostonly': + if vboxoptions.vboxnet_type == 'nat': + data['nic'] = 'natnetwork' + data['natnetwork'] = vboxoptions.vboxnet_name + data['hostonlyadapter'] = None + data['intnet'] = None + # data['nicpromisc1'] = None + else: + data[ + 'hostonlyadapter'] = vboxoptions.vboxnet_name + data['natnetwork'] = None + else: + data['natnetwork'] = None + vboxmanage.vboxmanage_modifyvm(node, + nic=data['nic'], nictype=data['nictype'], + nicpromisc=data['nicpromisc'], + nicnum=int(adapter), intnet=data['intnet'], + hostonlyadapter=data['hostonlyadapter'], + natnetwork=data['natnetwork'], + prefix="{}-{}".format(vboxoptions.userid, + vboxoptions.labname)) + + if vboxoptions.add_nat_interface: + last_adapter += 1 + vboxmanage.vboxmanage_modifyvm(node, nicnum=adapter, nictype='nat') + + # Add port forwarding rules for controllers nat interfaces + if vboxoptions.vboxnet_type == 'nat' and 'controller' in node: + if 'controller-0' in node: + local_port = vboxoptions.nat_controller0_local_ssh_port + ip = vboxoptions.controller0_ip + elif 'controller-1' in node: + local_port = vboxoptions.nat_controller1_local_ssh_port + ip = vboxoptions.controller1_ip + vboxmanage.vboxmanage_port_forward(node, vboxoptions.vboxnet_name, + local_port=local_port, guest_port='22', guest_ip=ip) + + # Floating ip port forwarding + if vboxoptions.vboxnet_type == 'nat' and vboxoptions.setup_type != 'AIO-SX': + local_port = vboxoptions.nat_controller_floating_local_ssh_port + ip = vboxoptions.controller_floating_ip + name = vboxoptions.labname + 'controller-float' + vboxmanage.vboxmanage_port_forward(name, vboxoptions.vboxnet_name, + local_port=local_port, guest_port='22', guest_ip=ip) + + ctrlr0 = vboxoptions.labname + '-controller-0' + vboxmanage.vboxmanage_storagectl( + ctrlr0, + storectl="ide", + hostiocache=vboxoptions.hostiocache) + vboxmanage.vboxmanage_storageattach( + ctrlr0, storectl="ide", storetype="dvddrive", + disk=vboxoptions.iso_location, device_num="0", port_num="1") + + +def get_hostnames(ignore=None, personalities=['controller', 'storage', 'worker']): + """ + Based on the number of nodes defined on the command line, construct + the hostnames of each node. + """ + + hostnames = {} + if vboxoptions.controllers and 'controller' in personalities: + for node_id in range(0, vboxoptions.controllers): + node_name = vboxoptions.labname + "-controller-{}".format(node_id) + if ignore and node_name in ignore: + continue + hostnames[node_name] = 'controller-{}'.format(id) + if vboxoptions.workers and 'worker' in personalities: + for node_id in range(0, vboxoptions.workers): + node_name = vboxoptions.labname + "-worker-{}".format(node_id) + if ignore and node_name in ignore: + continue + hostnames[node_name] = 'worker-{}'.format(id) + if vboxoptions.storages and 'storage' in personalities: + for node_id in range(0, vboxoptions.storages): + node_name = vboxoptions.labname + "-storage-{}".format(node_id) + if ignore and node_name in ignore: + continue + hostnames[node_name] = 'storage-{}'.format(node_id) + + return hostnames + + +def get_personalities(ignore=None): + """ + Map the target to the node type. + """ + + personalities = {} + if vboxoptions.controllers: + for node_id in range(0, vboxoptions.controllers): + node_name = vboxoptions.labname + "-controller-{}".format(node_id) + if ignore and node_name in ignore: + continue + personalities[node_name] = 'controller' + if vboxoptions.workers: + for node_id in range(0, vboxoptions.workers): + node_name = vboxoptions.labname + "-worker-{}".format(node_id) + if ignore and node_name in ignore: + continue + personalities[node_name] = 'worker' + if vboxoptions.storages: + for node_id in range(0, vboxoptions.storages): + node_name = vboxoptions.labname + "-storage-{}".format(node_id) + if ignore and node_name in ignore: + continue + personalities[node_name] = 'storage' + + return personalities + + +def create_host_bulk_add(): + """ + Sample xml: + + + + controller + 08:00:27:4B:6A:6A + + + storage + 08:00:27:36:14:3D + + + storage + 08:00:27:B3:D0:69 + + + worker-0 + worker + 08:00:27:47:68:52 + + + worker-1 + worker + 08:00:27:31:15:48 + + + """ + LOG.info("Creating content for 'system host-bulk-add'") + vms = vboxmanage.get_all_vms(vboxoptions.labname, option="vms") + ctrl0 = vboxoptions.labname + "-controller-0" + vms.remove(ctrl0) + + # Get management macs + macs = {} + for vm in vms: + info = vboxmanage.vboxmanage_showinfo(vm).splitlines() + for line in info: + try: + k, v = line.split(b'=') + except ValueError: + continue + if k == b'macaddress2': + orig_mac = v.decode('utf-8').replace("\"", "") + # Do for e.g.: 080027C95571 -> 08:00:27:C9:55:71 + macs[vm] = ":".join(re.findall(r"..", orig_mac)) + + # Get personalities + personalities = get_personalities(ignore=[ctrl0]) + hostnames = get_hostnames(ignore=[ctrl0]) + + # Create file + host_xml = ('\n' + '\n') + for vm in vms: + host_xml += ' \n' + host_xml += ' {}\n'.format(hostnames[vm]) + host_xml += ' {}\n'.format( + personalities[vm]) + host_xml += ' {}\n'.format(macs[vm]) + host_xml += ' \n' + host_xml += '\n' + + return host_xml + +serial_prompt_configured = False + + +def wait_for_hosts(ssh_client, hostnames, status, + timeout=HostTimeout.HOST_INSTALL, interval=20): + """ + Wait for a given interval for the host(s) to reach the expected + status. + """ + + start = time.time() + while hostnames: + LOG.info("Hosts not %s: %s", status, hostnames) + if (time.time() - start) > HostTimeout.HOST_INSTALL: + LOG.info("VMs not booted in %s, aborting: %s", timeout, hostnames) + raise Exception("VMs failed to go %s!", status) + # Get host list + host_statuses, _, _ = run_ssh_cmd( + ssh_client, 'source /etc/nova/openrc; system host-list', timeout=30) + host_statuses = host_statuses[1:-1] + for host_status in host_statuses: + for host in hostnames: + if host in host_status and status in host_status: + hostnames.remove(host) + if hostnames: + LOG.info("Waiting %s sec before re-checking host status.", interval) + time.sleep(interval) + +CONSOLE_UNKNOWN_MODE = 'disconnected' +CONSOLE_USER_MODE = 'user' +CONSOLE_ROOT_MODE = 'root' +serial_console_mode = CONSOLE_UNKNOWN_MODE + + +def run_ssh_cmd(ssh_client, cmd, timeout=5, + log_output=True, mode=CONSOLE_USER_MODE): + """ + Execute an arbitrary command on a target. + """ + + if mode == CONSOLE_ROOT_MODE: + LOG.info(">>>>>") + cmd = "sudo {}".format(cmd) + LOG.info("#### Running command over ssh: '%s'", cmd) + stdin, stdout, stderr = ssh_client.exec_command(cmd, timeout, get_pty=True) + if mode == CONSOLE_ROOT_MODE: + stdin.write('{}\n'.format(vboxoptions.password)) + stdin.flush() + stdout_lines = [] + while True: + if stdout.channel.exit_status_ready(): + break + stdout_lines.append(stdout.readline().rstrip('\n')) + if log_output and stdout: + LOG.info("|%s", stdout_lines[-1]) + stderr_lines = stderr.readlines() + if log_output and stderr_lines: + LOG.info("stderr:|\n%s", "".join(stderr_lines)) + return_code = stdout.channel.recv_exit_status() + LOG.info("Return code: %s", return_code) + if mode == CONSOLE_ROOT_MODE: + # Cut sudo's password echo and "Password:" string from output + stdout_lines = stdout_lines[2:] + return stdout_lines, stderr_lines, return_code + + +def set_serial_prompt_mode(stream, mode): + """ + To make sure that we are at the correct prompt, + we first logout, then login back again. + Note that logging out also helps fixing some problems with passwords + not getting accepted in some cases (prompt just hangs after inserting + password). + """ + + global serial_console_mode + + if serial_console_mode == mode: + LOG.info("Serial console prompt already set to '%s' mode.", mode) + return + if serial_console_mode != CONSOLE_USER_MODE: + # Set mode to user first, even if we later go to root + serial.send_bytes(stream, "exit\n", expect_prompt=False) + if serial.expect_bytes(stream, "ogin:", fail_ok=True, timeout=4): + serial.send_bytes(stream, "exit\n", expect_prompt=False) + if serial.expect_bytes(stream, "ogin:", fail_ok=True, timeout=4): + LOG.info("Expected login prompt, connect to console" \ + "stop any running processes and log out.") + raise Exception("Failure getting login prompt on serial console!") + serial.send_bytes( + stream, + vboxoptions.username, + prompt="assword:", + timeout=30) + if serial.send_bytes(stream, vboxoptions.password, prompt="~$", fail_ok=True, timeout=30): + raise Exception("Login failure, invalid password?") + if mode == CONSOLE_USER_MODE: + serial.send_bytes(stream, "source /etc/nova/openrc\n", + timeout=30, prompt='keystone') + serial_console_mode = CONSOLE_USER_MODE + if mode == 'root' and serial_console_mode != 'root': + serial.send_bytes(stream, 'sudo su -', expect_prompt=False) + host_helper.check_password(stream, password=vboxoptions.password) + serial.send_bytes( + stream, + "cd /home/wrsroot", + prompt="/home/wrsroot# ", + timeout=30) + serial.send_bytes(stream, "source /etc/nova/openrc\n", + timeout=30, prompt='keystone') + serial_console_mode = CONSOLE_ROOT_MODE + serial.send_bytes(stream, "export TMOUT=0", timeout=10, prompt='keystone') + # also reset OAM networking? + + +def serial_prompt_mode(mode): + def real_decorator(func): + def func_wrapper(*args, **kwargs): + try: + set_serial_prompt_mode(kwargs['stream'], mode) + except: + LOG.info("Serial console login as '%s' failed. Retrying once.", mode) + set_serial_prompt_mode(kwargs['stream'], mode) + return func(*args, **kwargs) + return func_wrapper + return real_decorator + + +def _connect_to_serial(vm=None): + if not vm: + vm = vboxoptions.labname + "-controller-0" + sock = serial.connect(vm, 10000, getpass.getuser()) + return sock, streamexpect.wrap(sock, echo=True, close_stream=False) + + +def connect_to_serial(func): + def func_wrapper(*args, **kwargs): + try: + sock, kwargs['stream'] = _connect_to_serial() + return func(*args, **kwargs) + finally: + serial.disconnect(sock) + + return func_wrapper + + +def _connect_to_ssh(): + + # Get ip and port for ssh on floating ip + ip, port = get_ssh_ip_and_port() + + # Remove ssh key + # For hostonly adapter we remove port 22 of controller ip + # for nat interfaces we remove the specific port on 127.0.0.1 as + # we have port forwarding enabled. + if vboxoptions.vboxnet_type == 'nat': + keygen_arg = "[127.0.0.1]:{}".format(port) + else: + keygen_arg = ip + cmd = 'ssh-keygen -f "/home/{}/.ssh/known_hosts" -R {}'.format( + getpass.getuser(), keygen_arg) + LOG.info("CMD: %s", cmd) + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) + for line in iter(process.stdout.readline, b''): + LOG.info("%s", line.decode("utf-8").strip()) + process.wait() + + # Connect to ssh + ssh = paramiko.SSHClient() + ssh.load_system_host_keys() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + ssh.connect(ip, port=port, username=vboxoptions.username, + password=vboxoptions.password, look_for_keys=False, allow_agent=False) + return ssh + + +def connect_to_ssh(func): + def func_wrapper(*args, **kwargs): + try: + ssh = _connect_to_ssh() + kwargs['ssh_client'] = ssh + return func(*args, **kwargs) + finally: + ssh.close() + return func_wrapper + + +def stage_test_success(): + LOG.info("Executing stage_test_success") + + +def stage_test_fail(): + LOG.info("Executing stage_test_success") + raise Exception("exception as of stage_test_fail") + + +def stage_create_lab(): + delete_lab(vboxoptions.labname, vboxoptions.force_delete_lab) + create_lab(vboxoptions) + # time.sleep(2) + + +def stage_install_controller0(): + node_list = vboxmanage.get_all_vms(vboxoptions.labname, option="vms") + LOG.info("Found nodes: %s", node_list) + + ctrlr0 = vboxoptions.labname + "-controller-0" + assert ctrlr0 in node_list, "controller-0 not in vm list. Stopping installation." + + vboxmanage.vboxmanage_startvm(ctrlr0) + + sock = serial.connect(ctrlr0, 10000, getpass.getuser()) + cont0_stream = streamexpect.wrap(sock, echo=True, close_stream=False) + + install_controller_0( + cont0_stream, vboxoptions.setup_type, vboxoptions.securityprofile, + vboxoptions.lowlatency, + install_mode=vboxoptions.install_mode, ctrlr0_ip=vboxoptions.controller0_ip, + gateway_ip=vboxoptions.vboxnet_ip, + username=vboxoptions.username, password=vboxoptions.password) + serial.disconnect(sock) + time.sleep(5) + + +@connect_to_serial +def stage_config_controller(stream): + ip, port = get_ssh_ip_and_port( + 'controller-0') # Floating ip is not yet configured + if True: + # Updated config file + LOG.info("#### Updating config_controller ini file networking" \ + "settings and uploading it to controller.") + destination = "/home/" + \ + vboxoptions.username + "/stx_config.ini_centos" + configini = configparser.ConfigParser() + configini.optionxform = str + configini.read(vboxoptions.config_controller_ini) + old_cidr = configini['OAM_NETWORK']['CIDR'] + new_cidr = vboxoptions.ini_oam_cidr + LOG.info("Replacing OAM_NETWORK/CIDR from %s to %s", old_cidr, new_cidr) + configini['OAM_NETWORK']['CIDR'] = new_cidr + old_gateway = configini['OAM_NETWORK']['GATEWAY'] + new_gateway = vboxoptions.vboxnet_ip + LOG.info("Replacing OAM_NETWORK/GATEWAY from %s to %s", old_gateway, new_gateway) + configini['OAM_NETWORK']['GATEWAY'] = new_gateway + if vboxoptions.setup_type == AIO_SX: + old_ip_address = configini['OAM_NETWORK']['IP_ADDRESS'] + new_ip_address = vboxoptions.controller0_ip + LOG.info("Replacing OAM_NETWORK/IP_ADDRESS from %s to %s", + old_ip_address, new_ip_address) + configini['OAM_NETWORK']['IP_ADDRESS'] = new_ip_address + else: + old_start_addr = configini['OAM_NETWORK']['IP_START_ADDRESS'] + new_start_addr = vboxoptions.ini_oam_ip_start_address + LOG.info("Replacing OAM_NETWORK/IP_START_ADDRESS from %s to %s", + old_start_addr, new_start_addr) + configini['OAM_NETWORK']['IP_START_ADDRESS'] = new_start_addr + old_end_addr = configini['OAM_NETWORK']['IP_END_ADDRESS'] + new_end_addr = vboxoptions.ini_oam_ip_end_address + LOG.info("Replacing OAM_NETWORK/IP_END_ADDRESS from %s to %s", + old_end_addr, new_end_addr) + configini['OAM_NETWORK']['IP_END_ADDRESS'] = new_end_addr + + # Take updated config file and copy it to controller + with tempfile.NamedTemporaryFile(mode='w') as fp: + configini.write(fp, space_around_delimiters=False) + fp.flush() + + sftp_send( + fp.name, remote_host=ip, remote_port=port, destination=destination, + username=vboxoptions.username, password=vboxoptions.password) + else: + destination = "/home/" + \ + vboxoptions.username + "/stx_config.ini_centos" + sftp_send( + vboxoptions.config_controller_ini, remote_host=ip, remote_port=port, + destination=destination, + username=vboxoptions.username, password=vboxoptions.password) + + # Run config_controller + LOG.info("#### Running config_controller") + install_lab.config_controller(stream, config_file=destination, + password=vboxoptions.password) + + # Wait for services to stabilize + time.sleep(120) + + if vboxoptions.setup_type == AIO_SX: + # Increase AIO responsiveness by allocating more cores to platform + install_lab.update_platform_cpus(stream, 'controller-0') + + +def get_ssh_ip_and_port(node='floating'): + if vboxoptions.vboxnet_type == 'nat': + ip = '127.0.0.1' + if node == 'floating': + if vboxoptions.setup_type != 'AIO-SX': + port = vboxoptions.nat_controller_floating_local_ssh_port + else: + port = vboxoptions.nat_controller0_local_ssh_port + elif node == 'controller-0': + port = vboxoptions.nat_controller0_local_ssh_port + elif node == 'controller-1': + port = vboxoptions.nat_controller_1_local_ssh_port + else: + raise Exception("Undefined node '{}'".format(node)) + else: + if node == 'floating': + if vboxoptions.setup_type != 'AIO-SX': + ip = vboxoptions.controller_floating_ip + else: + ip = vboxoptions.controller0_ip + elif node == 'controller-0': + ip = vboxoptions.controller0_ip + elif node == 'controller-1': + ip = vboxoptions.controller1_ip + else: + raise Exception("Undefined node '{}'".format(node)) + port = 22 + return ip, port + +#@connect_to_serial +#@serial_prompt_mode(CONSOLE_USER_MODE) + + +def stage_rsync_config(): + # Get ip and port for ssh on floating ip + ip, port = get_ssh_ip_and_port() + # Copy config files to controller + if vboxoptions.config_files_dir: + local_path = vboxoptions.config_files_dir + send_dir(source=local_path, remote_host=ip, remote_port=port, + destination='/home/' + vboxoptions.username + '/', + username=vboxoptions.username, password=vboxoptions.password) + if vboxoptions.config_files_dir_dont_follow_links: + local_path = vboxoptions.config_files_dir_dont_follow_links + send_dir(source=local_path, remote_host=ip, remote_port=port, + destination='/home/' + vboxoptions.username + '/', + username=vboxoptions.username, password=vboxoptions.password) + if not vboxoptions.config_files_dir and not vboxoptions.config_files_dir_dont_follow_links: + LOG.info("No rsync done! Please set config-files-dir" \ + "and/or config-files-dir-dont-follow-links") + + +@connect_to_serial +@serial_prompt_mode(CONSOLE_USER_MODE) +def _run_lab_setup_serial(stream): + conf_str = "" + for cfg_file in vboxoptions.lab_setup_conf: + conf_str = conf_str + " -f {}".format(cfg_file) + + serial.send_bytes(stream, "sh lab_setup.sh {}".format(conf_str), + timeout=HostTimeout.LAB_INSTALL, prompt='keystone') + LOG.info("Lab setup execution completed. Checking if return code is 0.") + serial.send_bytes(stream, "echo \"Return code: [$?]\"", + timeout=3, prompt='Return code: [0]') + + +@connect_to_ssh +def _run_lab_setup(ssh_client): + conf_str = "" + for cfg_file in vboxoptions.lab_setup_conf: + conf_str = conf_str + " -f {}".format(cfg_file) + + _, _, exitcode = run_ssh_cmd(ssh_client, + 'source /etc/platform/openrc; ' + 'export PATH="$PATH:/usr/local/bin; ' + 'export PATH="$PATH:/usr/bin; ' + 'export PATH="$PATH:/usr/local/sbin; ' + 'export PATH="$PATH:/usr/sbin"; ' + 'sh lab_setup.sh {}'.format(conf_str), + timeout=HostTimeout.LAB_INSTALL) + if exitcode != 0: + msg = "Lab setup failed, expecting exit code of 0 but got {}.".format( + exitcode) + LOG.info(msg) + raise Exception(msg) + + +def stage_lab_setup1(): + _run_lab_setup() + + +def stage_lab_setup2(): + _run_lab_setup() + + +def stage_lab_setup3(): + _run_lab_setup() + + +def stage_lab_setup4(): + _run_lab_setup() + + +def stage_lab_setup5(): + _run_lab_setup() + + +@connect_to_ssh +@connect_to_serial +def stage_unlock_controller0(stream, ssh_client): + LOG.info("#### Unlocking controller-0") + _, _, _ = run_ssh_cmd(ssh_client, + 'source /etc/nova/openrc; system host-unlock controller-0', + timeout=HostTimeout.CONTROLLER_UNLOCK) + + LOG.info("#### Waiting for controller-0 to reboot") + serial.expect_bytes( + stream, + 'login:', + timeout=HostTimeout.CONTROLLER_UNLOCK) + + LOG.info("Waiting 120s for services to activate.") + time.sleep(120) + + # Make sure we login again, after reboot we are not logged in. + serial_console_mode = CONSOLE_UNKNOWN_MODE + + +@connect_to_serial +@serial_prompt_mode(CONSOLE_USER_MODE) +def stage_unlock_controller0_serial(stream): + global serial_console_mode + if host_helper.unlock_host(stream, 'controller-0'): + LOG.info("Host is unlocked, nothing to do. Exiting stage.") + return + + serial.expect_bytes( + stream, + 'login:', + timeout=HostTimeout.CONTROLLER_UNLOCK) + + LOG.info("Waiting 120s for services to activate.") + time.sleep(120) + + # Make sure we login again + serial_console_mode = CONSOLE_UNKNOWN_MODE # After reboot we are not logged in. + + +@connect_to_ssh +def stage_install_nodes(ssh_client): + # Create and transfer host_bulk_add.xml to ctrl-0 + host_xml = create_host_bulk_add() + + LOG.info("host_bulk_add.xml content:\n%s", host_xml) + + # Send file to controller + destination = "/home/" + vboxoptions.username + "/host_bulk_add.xml" + with tempfile.NamedTemporaryFile() as fp: + fp.write(host_xml.encode('utf-8')) + fp.flush() + # Connection to NAT interfaces is local + if vboxoptions.vboxnet_type == 'nat': + ip = '127.0.0.1' + port = vboxoptions.nat_controller0_local_ssh_port + else: + ip = vboxoptions.controller0_ip + port = 22 + sftp_send(source=fp.name, remote_host=ip, remote_port=port, + destination=destination, + username=vboxoptions.username, password=vboxoptions.password) + # Apply host-bulk-add + _, _, exitcode = run_ssh_cmd(ssh_client, + 'source /etc/nova/openrc; ', + 'system host-bulk-add {}'.format(destination), + timeout=60) + if exitcode != 0: + msg = "Host bulk add failed, expecting exit code of 0 but got %s", exitcode + LOG.info(msg) + raise Exception(msg) + + # Start hosts one by one, wait 10s between each start + vms = vboxmanage.get_all_vms(vboxoptions.labname, option="vms") + runningvms = vboxmanage.get_all_vms( + vboxoptions.labname, + option="runningvms") + powered_off = list(set(vms) - set(runningvms)) + LOG.info("#### Powered off VMs: %s", powered_off) + for vm in powered_off: + LOG.info("#### Powering on VM: %s", vm) + vboxmanage.vboxmanage_startvm(vm, force=True) + LOG.info("Give VM 20s to boot.") + time.sleep(20) + + ctrl0 = vboxoptions.labname + "-controller-0" + hostnames = list(get_hostnames(ignore=[ctrl0]).values()) + + wait_for_hosts(ssh_client, hostnames, 'online') + + +@connect_to_ssh +def stage_unlock_controller1(ssh_client): + # Fast for standard, wait for storage + hostnames = list(get_hostnames().values()) + if 'controller-1' not in hostnames: + LOG.info("Controller-1 not configured, skipping unlock.") + return + + LOG.info("#### Unlocking controller-1") + run_ssh_cmd(ssh_client, + 'source /etc/nova/openrc; system host-unlock controller-1', + timeout=60) + + LOG.info("#### waiting for controller-1 to be available.") + wait_for_hosts(ssh_client, ['controller-1'], 'available') + + +@connect_to_ssh +def stage_unlock_storages(ssh_client): + # Unlock storage nodes, wait for them to be 'available' + storages = list(get_hostnames(personalities=['storage']).values()) + + for storage in storages: + run_ssh_cmd(ssh_client, + 'source /etc/nova/openrc; system host-unlock {}'.format(storage), + timeout=60) + LOG.info("Waiting 15s before next unlock") + time.sleep(15) + + LOG.info("#### Waiting for all hosts to be available.") + wait_for_hosts(ssh_client, storages, 'available') + + +@connect_to_ssh +def stage_unlock_workers(ssh_client): + # Unlock all, wait for all hosts, except ctrl0 to be 'available' + workers = list(get_hostnames(personalities=['worker']).values()) + ctrl0 = vboxoptions.labname + '-controller-0' + + for worker in workers: + run_ssh_cmd( + ssh_client, + 'source /etc/nova/openrc; system host-unlock {}'.format(worker), + timeout=60) + LOG.info("Waiting 15s before next unlock") + time.sleep(15) + + # Wait for all hosts, except ctrl0 to be available + # At this stage we expect ctrl1 to also be available + hosts = list(get_hostnames(ignore=[ctrl0]).values()) + wait_for_hosts(ssh_client, hosts, 'available') + + +def run_custom_script(script, timeout, console, mode): + LOG.info("#### Running custom script %s with options:", script) + LOG.info(" timeout: %s", timeout) + LOG.info(" console mode: %s", console) + LOG.info(" user mode: %s", mode) + if console == 'ssh': + ssh_client = _connect_to_ssh() + _, __, return_code = run_ssh_cmd(ssh_client, "./{}".format(script), + timeout=timeout, mode=mode) + if return_code != 0: + LOG.info("Custom script '%s' return code is not 0. Aborting.", script) + raise Exception("Script execution failed with return code: {}".format(return_code)) + else: + sock, stream = _connect_to_serial() + try: + if mode == 'root': + set_serial_prompt_mode(stream, CONSOLE_ROOT_MODE) + # Login as root + serial.send_bytes(stream, 'sudo su -', expect_prompt=False) + host_helper.check_password( + stream, + password=vboxoptions.password) + else: + set_serial_prompt_mode(stream, CONSOLE_USER_MODE) + serial.send_bytes(stream, "./{}".format(script), + timeout=timeout, prompt='keystone') + LOG.info("Script execution completed. Checking if return code is 0.") + serial.send_bytes(stream, + "echo \"Return code: [$?]\"".format(script), + timeout=3, prompt='Return code: [0]') + finally: + sock.close() + + +def get_custom_script_options(options_list): + LOG.info("Parsing custom script options: %s", options_list) + # defaults + script = "" + timeout = 5 + console = 'serial' + mode = 'user' + # supported options + CONSOLES = ['serial', 'ssh'] + MODES = ['user', 'root'] + + # No spaces or special chars allowed + not_allowed = ['\n', ' ', '*'] + for c in not_allowed: + if c in options_list: + LOG.info("Char '%s' not allowed in options list: %s.", c, options_list) + raise Exception("Char not allowed in options_list") + + # get options + options = options_list.split(',') + if len(options) >= 1: + script = options[0] + if len(options) >= 2: + timeout = int(options[1]) + if len(options) >= 3: + console = options[2] + if console not in CONSOLES: + raise "Console must be one of {}, not {}.".format( + CONSOLES, console) + if len(options) >= 4: + mode = options[3] + if mode not in MODES: + raise "Mode must be one of {}, not {}.".format(MODES, mode) + return script, timeout, console, mode + + +def stage_custom_script1(): + if vboxoptions.script1: + script, timeout, console, mode = get_custom_script_options( + vboxoptions.script1) + else: + script = "custom_script1.sh" + timeout = 3600 + console = 'serial' + mode = 'user' + run_custom_script(script, timeout, console, mode) + + +def stage_custom_script2(): + if vboxoptions.script2: + script, timeout, console, mode = get_custom_script_options( + vboxoptions.script2) + else: + script = "custom_script2.sh" + timeout = 3600 + console = 'serial' + mode = 'user' + run_custom_script(script, timeout, console, mode) + + +def stage_custom_script3(): + if vboxoptions.script3: + script, timeout, console, mode = get_custom_script_options( + vboxoptions.script3) + else: + script = "custom_script3.sh" + timeout = 3600 + console = 'serial' + mode = 'user' + run_custom_script(script, timeout, console, mode) + + +def stage_custom_script4(): + if vboxoptions.script4: + script, timeout, console, mode = get_custom_script_options( + vboxoptions.script4) + else: + script = "custom_script4.sh" + timeout = 3600 + console = 'serial' + mode = 'user' + run_custom_script(script, timeout, console, mode) + + +def stage_custom_script5(): + if vboxoptions.script5: + script, timeout, console, mode = get_custom_script_options( + vboxoptions.script5) + else: + script = "custom_script5.sh" + timeout = 3600 + console = 'serial' + mode = 'user' + run_custom_script(script, timeout, console, mode) + +STG_CREATE_LAB = "create-lab" +STG_INSTALL_CONTROLLER0 = "install-controller-0" +STG_CONFIG_CONTROLLER = "config-controller" +STG_RSYNC_CONFIG = "rsync-config" +STG_LAB_SETUP1 = "lab-setup1" +STG_UNLOCK_CONTROLLER0 = "unlock-controller-0" +STG_LAB_SETUP2 = "lab-setup2" +STG_INSTALL_NODES = "install-nodes" +STG_UNLOCK_CONTROLLER1 = "unlock-controller-1" +STG_LAB_SETUP3 = "lab-setup3" +STG_UNLOCK_STORAGES = "unlock-storages" +STG_LAB_SETUP4 = "lab-setup4" +STG_UNLOCK_WORKERS = "unlock-workers" +STG_LAB_SETUP5 = "lab-setup5" +STG_CUSTOM_SCRIPT1 = "custom-script1" +STG_CUSTOM_SCRIPT2 = "custom-script2" +STG_CUSTOM_SCRIPT3 = "custom-script3" +STG_CUSTOM_SCRIPT4 = "custom-script4" +STG_CUSTOM_SCRIPT5 = "custom-script5" + +# For internal testing only, one stage is always successful +# the other one always raises an exception. +STC_TEST_SUCCESS = "test-success" +STG_TEST_FAIL = "test-fail" + +CALLBACK = 'callback' +HELP = 'help' + +STAGE_CALLBACKS = { + STG_CREATE_LAB: + {CALLBACK: stage_create_lab, + HELP: "Create VMs in vbox: controller-0, controller-1..."}, + STG_INSTALL_CONTROLLER0: + {CALLBACK: stage_install_controller0, + HELP: "Install controller-0 from --iso-location"}, + STG_CONFIG_CONTROLLER: + {CALLBACK: stage_config_controller, + HELP: "Run config controller using the --config-controller-ini" \ + "updated based on --ini-* options."}, + STG_RSYNC_CONFIG: + {CALLBACK: stage_rsync_config, + HELP: "Rsync all files from --config-files-dir and --config-files-dir* to /home/wrsroot."}, + STG_LAB_SETUP1: + {CALLBACK: stage_lab_setup1, + HELP: "Run lab_setup with one or more --lab-setup-conf files from controller-0."}, + STG_UNLOCK_CONTROLLER0: + {CALLBACK: stage_unlock_controller0, + HELP: "Unlock controller-0 and wait for it to reboot."}, + STG_LAB_SETUP2: + {CALLBACK: stage_lab_setup2, + HELP: "Run lab_setup with one or more --lab-setup-conf files from controller-0."}, + STG_INSTALL_NODES: + {CALLBACK: stage_install_nodes, + HELP: "Generate a host-bulk-add.xml, apply it and install all" \ + "other nodes, wait for them to be 'online."}, + STG_UNLOCK_CONTROLLER1: + {CALLBACK: stage_unlock_controller1, + HELP: "Unlock controller-1, wait for it to be 'available'"}, + STG_LAB_SETUP3: + {CALLBACK: stage_lab_setup3, + HELP: "Run lab_setup with one or more --lab-setup-conf files from controller-0."}, + STG_UNLOCK_STORAGES: + {CALLBACK: stage_unlock_storages, + HELP: "Unlock all storage nodes, wait for them to be 'available'"}, + STG_LAB_SETUP4: + {CALLBACK: stage_lab_setup4, + HELP: "Run lab_setup with one or more --lab-setup-conf files from controller-0."}, + STG_UNLOCK_WORKERS: + {CALLBACK: stage_unlock_workers, + HELP: "Unlock all workers, wait for them to be 'available"}, + STG_LAB_SETUP5: + {CALLBACK: stage_lab_setup5, + HELP: "Run lab_setup with one or more --lab-setup-conf files from controller-0."}, + STG_CUSTOM_SCRIPT1: + {CALLBACK: stage_custom_script1, + HELP: "Run a custom script from /home/wrsroot, make sure you" \ + "upload it in the rsync-config stage and it is +x. See help."}, + STG_CUSTOM_SCRIPT2: + {CALLBACK: stage_custom_script2, + HELP: "Run a custom script from /home/wrsroot, make sure you" \ + "upload it in the rsync-config stage and it is +x. See help."}, + STG_CUSTOM_SCRIPT3: + {CALLBACK: stage_custom_script3, + HELP: "Run a custom script from /home/wrsroot, make sure you" \ + "upload it in the rsync-config stage and it is +x. See help."}, + STG_CUSTOM_SCRIPT4: + {CALLBACK: stage_custom_script4, + HELP: "Run a custom script from /home/wrsroot, make sure you" \ + "upload it in the rsync-config stage and it is +x. See help."}, + STG_CUSTOM_SCRIPT5: + {CALLBACK: stage_custom_script5, + HELP: "Run a custom script from /home/wrsroot, make sure you" \ + "upload it in the rsync-config stage and it is +x. See help."}, + # internal testing + STC_TEST_SUCCESS: {CALLBACK: stage_test_success, + HELP: "Internal only, does not do anything, used for testing."}, + STG_TEST_FAIL: {CALLBACK: stage_test_fail, + HELP: "Internal only, raises exception, used for testing."}, +} + +AVAILABLE_STAGES = [STG_CREATE_LAB, + STG_INSTALL_CONTROLLER0, + STG_CONFIG_CONTROLLER, + STG_RSYNC_CONFIG, + STG_LAB_SETUP1, + STG_UNLOCK_CONTROLLER0, + STG_LAB_SETUP2, + STG_INSTALL_NODES, + STG_UNLOCK_CONTROLLER1, + STG_LAB_SETUP3, + STG_UNLOCK_STORAGES, + STG_LAB_SETUP4, + STG_UNLOCK_WORKERS, + STG_LAB_SETUP5, + STG_CUSTOM_SCRIPT1, + STG_CUSTOM_SCRIPT2, + STG_CUSTOM_SCRIPT3, + STG_CUSTOM_SCRIPT4, + STG_CUSTOM_SCRIPT5, + STC_TEST_SUCCESS, + STG_TEST_FAIL] + +AIO_SX_STAGES = [ + STG_CREATE_LAB, + STG_INSTALL_CONTROLLER0, + STG_CONFIG_CONTROLLER, + STG_RSYNC_CONFIG, + STG_LAB_SETUP1, + STG_UNLOCK_CONTROLLER0, + STG_LAB_SETUP2, +] + +AIO_DX_STAGES = [ + STG_CREATE_LAB, + STG_INSTALL_CONTROLLER0, + STG_CONFIG_CONTROLLER, + STG_RSYNC_CONFIG, + STG_LAB_SETUP1, + STG_UNLOCK_CONTROLLER0, + STG_INSTALL_NODES, + STG_LAB_SETUP2, + STG_UNLOCK_CONTROLLER1, + STG_LAB_SETUP3, +] + +STD_STAGES = [ + STG_CREATE_LAB, + STG_INSTALL_CONTROLLER0, + STG_CONFIG_CONTROLLER, + STG_RSYNC_CONFIG, + STG_LAB_SETUP1, + STG_UNLOCK_CONTROLLER0, + STG_INSTALL_NODES, + STG_LAB_SETUP2, + STG_UNLOCK_CONTROLLER1, + STG_LAB_SETUP3, + STG_UNLOCK_WORKERS +] + +STORAGE_STAGES = [ + STG_CREATE_LAB, + STG_INSTALL_CONTROLLER0, + STG_CONFIG_CONTROLLER, + STG_RSYNC_CONFIG, + STG_LAB_SETUP1, + STG_UNLOCK_CONTROLLER0, + STG_INSTALL_NODES, + STG_LAB_SETUP2, + STG_UNLOCK_CONTROLLER1, + STG_LAB_SETUP3, + STG_UNLOCK_STORAGES, + STG_LAB_SETUP4, + STG_UNLOCK_WORKERS, + STG_LAB_SETUP5 +] + +AIO_SX = 'AIO-SX' +AIO_DX = 'AIO-DX' +STANDARD = 'STANDARD' +STORAGE = 'STORAGE' + +STAGES_CHAINS = {AIO_SX: AIO_SX_STAGES, + AIO_DX: AIO_DX_STAGES, + STANDARD: STD_STAGES, + STORAGE: STORAGE_STAGES} +AVAILABLE_CHAINS = [AIO_SX, AIO_DX, STANDARD, STORAGE] + + +def load_config(): + global vboxoptions + vboxoptions = handle_args().parse_args() + + lab_config = [getattr(env.Lab, attr) + for attr in dir(env.Lab) if not attr.startswith('__')] + oam_config = [getattr(OAM, attr) + for attr in dir(OAM) if not attr.startswith('__')] + + if vboxoptions.controller0_ip is None: + vboxoptions.controller0_ip = lab_config[0]['controller-0_ip'] + + if vboxoptions.vboxnet_ip is None: + vboxoptions.vboxnet_ip = oam_config[0]['ip'] + + if vboxoptions.username is None: + vboxoptions.username = lab_config[0]['username'] + + if vboxoptions.password is None: + vboxoptions.password = lab_config[0]['password'] + if vboxoptions.hostiocache: + vboxoptions.hostiocache = 'on' + else: + vboxoptions.hostiocache = 'off' + if vboxoptions.lab_setup_conf is None: + vboxoptions.lab_setup_conf = {"~/lab_setup.conf"} + else: + vboxoptions.lab_setup_conf = vboxoptions.lab_setup_conf + + if vboxoptions.setup_type == AIO_SX: + vboxoptions.controllers = 1 + vboxoptions.workers = 0 + vboxoptions.storages = 0 + elif vboxoptions.setup_type == AIO_DX: + vboxoptions.controllers = 2 + vboxoptions.workers = 0 + vboxoptions.storages = 0 + elif vboxoptions.setup_type == STANDARD: + vboxoptions.storages = 0 + + +def pre_validate(vboxoptions): + err = False + if not vboxoptions.setup_type: + print("Please set --setup-type") + err = True + if not vboxoptions.labname: + print("Please set --labname") + err = True + if not vboxoptions.config_controller_ini: + print("Please set --iso-location") + err = True + if err: + print("\nMissing arguments. Please check --help and --list-stages for usage.") + exit(5) + + +def validate(vboxoptions, stages): + err = False + # Generic + if vboxoptions.vboxnet_type == 'nat': + if vboxoptions.setup_type != AIO_SX: + if not vboxoptions.nat_controller_floating_local_ssh_port: + print("Please set --nat-controller-floating-local-ssh-port") + err = True + if not vboxoptions.nat_controller0_local_ssh_port: + print("Please set --nat-controller0-local-ssh-port") + err = True + if vboxoptions.controllers > 1 and not vboxoptions.nat_controller1_local_ssh_port: + print("Second controller is configured, please set --nat-controller1-local-ssh-port") + err = True + else: + if vboxoptions.setup_type != AIO_SX: + if not vboxoptions.controller_floating_ip: + print("Please set --controller-floating-ip") + err = True + if not vboxoptions.controller0_ip: + print("Please set --controller0-ip") + err = True + if vboxoptions.controllers > 1 and not vboxoptions.controller1_ip: + print("Second controller is configured, please set --controller1-ip") + err = True + if STG_CONFIG_CONTROLLER in stages: + if not vboxoptions.config_controller_ini: + print("Please set --config-controller-ini " + "as needed by stage {}".format(STG_CONFIG_CONTROLLER)) + err = True + if STG_RSYNC_CONFIG in stages: + if not vboxoptions.config_files_dir and not vboxoptions.config_files_dir_dont_follow_links: + print("Please set --config-files-dir and/or --config-files-dir-dont-follow-links " + "as needed by stage {} and {}".format(STG_RSYNC_CONFIG, + STG_LAB_SETUP1)) + err = True + if (STG_LAB_SETUP1 in stages or STG_LAB_SETUP2 in stages + or STG_LAB_SETUP3 in stages or STG_LAB_SETUP4 in stages + or STG_LAB_SETUP5 in stages): + if not vboxoptions.lab_setup_conf: + print("Please set at least one --lab-setup-conf file as needed by lab-setup stages") + err = True + FILE = ["lab_setup.sh"] + dirs = [] + if vboxoptions.config_files_dir: + dirs.append(vboxoptions.config_files_dir) + if vboxoptions.config_files_dir_dont_follow_links: + dirs.append(vboxoptions.config_files_dir_dont_follow_links) + for directory in dirs: + pass + if err: + print("\nMissing arguments. Please check --help and --list-stages for usage.") + exit(5) + + +def wrap_stage_help(stage, stage_callbacks, number=None): + if number: + text = " {}. {}".format(number, stage) + else: + text = " {}".format(stage) + LEN = 30 + fill = LEN - len(text) + text += " " * fill + text += "# {}".format(stage_callbacks) + return text + +# Define signal handler for ctrl+c + + +def signal_handler(sig, frame): + print('You pressed Ctrl+C!') + kpi.print_kpi_metrics() + sys.exit(1) + +if __name__ == "__main__": + kpi.init_kpi_metrics() + signal.signal(signal.SIGINT, signal_handler) + + load_config() + + if vboxoptions.list_stages: + print("Defined setups: {}".format(list(STAGES_CHAINS.keys()))) + if vboxoptions.setup_type and vboxoptions.setup_type in AVAILABLE_CHAINS: + AVAILABLE_CHAINS = [vboxoptions.setup_type] + for setup in AVAILABLE_CHAINS: + i = 1 + print("Stages for setup: {}".format(setup)) + for stage in STAGES_CHAINS[setup]: + print(wrap_stage_help(stage, STAGE_CALLBACKS[stage][HELP], i)) + i += 1 + print("Available stages that can be used for --custom-stages:") + for stage in AVAILABLE_STAGES: + print(wrap_stage_help(stage, STAGE_CALLBACKS[stage][HELP])) + exit(0) + + pre_validate(vboxoptions) + + init_logging(vboxoptions.labname, vboxoptions.logpath) + LOG.info("Logging to directory: %s", (get_log_dir() + "/")) + + LOG.info("Install manages: %s controllers, %s workers, %s storages.", + vboxoptions.controllers, vboxoptions.workers, vboxoptions.storages) + + # Setup stages to run based on config + install_stages = [] + if vboxoptions.custom_stages: + # Custom stages + install_stages = vboxoptions.custom_stages.split(',') + for stage in install_stages: + invalid_stages = [] + if stage not in AVAILABLE_STAGES: + invalid_stages.append(stage) + if invalid_stages: + LOG.info("Following custom stages are not supported: %s.\n" \ + "Choose from: %s", invalid_stages, AVAILABLE_STAGES) + exit(1) + else: + # List all stages between 'from-stage' to 'to-stage' + stages = STAGES_CHAINS[vboxoptions.setup_type] + from_index = 0 + to_index = None + if vboxoptions.from_stage: + if vboxoptions.from_stage == 'start': + from_index = 0 + else: + from_index = stages.index(vboxoptions.from_stage) + if vboxoptions.to_stage: + if vboxoptions.from_stage == 'end': + to_index = -1 + else: + to_index = stages.index(vboxoptions.to_stage) + 1 + if to_index is not None: + install_stages = stages[from_index:to_index] + else: + install_stages = stages[from_index:] + LOG.info("Executing %s stage(s): %s.", len(install_stages), install_stages) + + validate(vboxoptions, install_stages) + + stg_no = 0 + prev_stage = None + for stage in install_stages: + stg_no += 1 + start = time.time() + try: + LOG.info("######## (%s/%s) Entering stage %s ########", + stg_no, + len(install_stages), + stage) + STAGE_CALLBACKS[stage][CALLBACK]() + + # Take snapshot if configured + if vboxoptions.snapshot: + vboxmanage.take_snapshot( + vboxoptions.labname, + "snapshot-AFTER-{}".format(stage)) + + # Compute KPIs + duration = time.time() - start + kpi.set_kpi_metric(stage, duration) + kpi.print_kpi(stage) + kpi.print_kpi('total') + except Exception as e: + duration = time.time() - start + kpi.set_kpi_metric(stage, duration) + LOG.info("INSTALL FAILED, ABORTING!") + kpi.print_kpi_metrics() + LOG.info("Exception details: %s", e) + raise + # Stage completed + prev_stage = stage + + LOG.info("INSTALL SUCCEEDED!") + kpi.print_kpi_metrics() diff --git a/deployment/virtualbox/pybox/requirements.txt b/deployment/virtualbox/pybox/requirements.txt new file mode 100644 index 00000000..dd488e2d --- /dev/null +++ b/deployment/virtualbox/pybox/requirements.txt @@ -0,0 +1,6 @@ +# Install rsync, sshpass +# +configparser +paramiko +pytest +streamexpect diff --git a/deployment/virtualbox/pybox/utils/__init__.py b/deployment/virtualbox/pybox/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deployment/virtualbox/pybox/utils/install_log.py b/deployment/virtualbox/pybox/utils/install_log.py new file mode 100644 index 00000000..2aa6ac19 --- /dev/null +++ b/deployment/virtualbox/pybox/utils/install_log.py @@ -0,0 +1,52 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import datetime +import logging +from consts.env import LOGPATH + +log_dir = "" +LOG = logging.getLogger() + +def init_logging(lab_name, log_path=None): + global LOG, log_dir + if not log_path: + log_path = LOGPATH + lab_log_path = log_path + "/" + lab_name + + # Setup log sub-directory for current run + current_time = datetime.datetime.now() + log_dir = "{}/{}_{}_{}_{}_{}_{}".format(lab_log_path, + current_time.year, + current_time.month, + current_time.day, + current_time.hour, + current_time.minute, + current_time.second) + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + LOG.setLevel(logging.INFO) + formatter = logging.Formatter("%(asctime)s: %(message)s") + log_file = "{}/install.log".format(log_dir) + handler = logging.FileHandler(log_file) + handler.setFormatter(formatter) + handler.setLevel(logging.INFO) + LOG.addHandler(handler) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + LOG.addHandler(handler) + + # Create symbolic link to latest logs of this lab + try: + os.unlink(lab_log_path + "/latest") + except: + pass + os.symlink(log_dir, lab_log_path + "/latest") + +def get_log_dir(): + return log_dir diff --git a/deployment/virtualbox/pybox/utils/kpi.py b/deployment/virtualbox/pybox/utils/kpi.py new file mode 100644 index 00000000..503c841f --- /dev/null +++ b/deployment/virtualbox/pybox/utils/kpi.py @@ -0,0 +1,68 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +from utils.install_log import LOG + +STAGES = [] +METRICS = {} +start = 0 + +def init_kpi_metrics(): + global start + start = time.time() + +def get_formated_time(sec): + hours = sec // 3600 + sec %= 3600 + minutes = sec // 60 + sec %= 60 + seconds = sec + if hours: + return "{:.0f}h {:.0f}m {:.2f}s".format(hours, minutes, seconds) + elif minutes: + return "{:.0f}m {:.2f}s".format(minutes, seconds) + elif seconds: + return "{:.2f}s".format(seconds) + +def set_kpi_metric(metric, duration): + global METRICS, STAGES + METRICS[metric] = duration + STAGES.append(metric) + +def print_kpi(metric): + if metric in STAGES: + sec = METRICS[metric] + LOG.info(" Time in stage '%s': %s ", metric, get_formated_time(sec)) + elif metric == 'total' and start: + duration = time.time() - start + LOG.info(" Total time: %s", get_formated_time(duration)) + +def get_kpi_str(metric): + msg = "" + if metric in STAGES: + sec = METRICS[metric] + msg += (" Time in stage '{}': {} \n".format(metric, get_formated_time(sec))) + elif metric == 'total' and start: + duration = time.time() - start + msg += (" Total time: {}\n".format(get_formated_time(duration))) + return msg + +def get_kpi_metrics_str(): + msg = "===================== Metrics ====================\n" + for stage in STAGES: + msg += get_kpi_str(stage) + msg += get_kpi_str('total') + msg += "===============================================\n" + return msg + +def print_kpi_metrics(): + LOG.info("===================== Metrics ====================") + for stage in STAGES: + print_kpi(stage) + print_kpi('total') + LOG.info("==================================================") + diff --git a/deployment/virtualbox/pybox/utils/serial.py b/deployment/virtualbox/pybox/utils/serial.py new file mode 100644 index 00000000..b22352db --- /dev/null +++ b/deployment/virtualbox/pybox/utils/serial.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +import socket +from sys import platform, stdout +import time +import streamexpect +from utils.install_log import LOG + + +def connect(hostname, port=10000, prefix=""): + """ + Connect to local domain socket and return the socket object. + + Arguments: + - Requires the hostname of target, e.g. controller-0 + - Requires TCP port if using Windows + """ + + if prefix: + prefix = "{}_".format(prefix) + socketname = "/tmp/{}{}".format(prefix, hostname) + if 'controller-0'in hostname: + socketname += '_serial' + LOG.info("Connecting to %s at %s", hostname, socketname) + if platform == 'win32' or platform == 'win64': + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) + else: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + if platform == 'win32' or platform == 'win64': + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + sock.connect(('localhost', port)) + else: + sock.connect(socketname) + except: + LOG.info("Connection failed") + pass + # disconnect(sock) + sock = None + # TODO (WEI): double check this + sock.setblocking(0) + + return sock + + +def disconnect(sock): + """ + Disconnect a local doamin socket. + + Arguments: + - Requires socket + """ + + # Shutdown connection and release resources + LOG.info("Disconnecting from socket") + sock.shutdown(socket.SHUT_RDWR) + sock.close() + +def get_output(stream, cmd, prompts=None, timeout=5, log=True, as_lines=True, flush=True): + #TODO: Not testested, will not work if kernel or other processes throw data on stdout or stderr + """ + Execute a command and get its output. Make sure no other command is executing. + And 'dmesg -D' was executed. + """ + POLL_PERIOD = 0.1 + MAX_READ_BUFFER = 1024 + data = "" + line_buf = "" + lines = [] + if not prompts: + prompts = [':~$ ', ':~# ', ':/home/wrsroot# ', '(keystone_.*)]$ ', '(keystone_.*)]# '] + # Flush buffers + if flush: + try: + trash = stream.poll(1) # flush input buffers + if trash: + try: + LOG.info("Buffer has bytes before cmd execution: %s", + trash.decode('utf-8')) + except Exception: + pass + except streamexpect.ExpectTimeout: + pass + + # Send command + stream.sendall("{}\n".format(cmd).encode('utf-8')) + + # Get response + patterns = [] + for prompt in prompts: + patterns.append(re.compile(prompt)) + + now = time.time() + end_time = now + float(timeout) + prev_timeout = stream.gettimeout() + stream.settimeout(POLL_PERIOD) + incoming = None + try: + while (end_time - now) >= 0: + try: + incoming = stream.recv(MAX_READ_BUFFER) + except socket.timeout: + pass + if incoming: + data += incoming + if log: + for c in incoming: + if c != '\n': + line_buf += c + else: + LOG.info(line_buf) + lines.append(line_buf) + line_buf = "" + for pattern in patterns: + if pattern.search(data): + if as_lines: + return lines + else: + return data + now = time.time() + raise streamexpect.ExpectTimeout() + finally: + stream.settimeout(prev_timeout) + + +def expect_bytes(stream, text, timeout=180, fail_ok=False, flush=True): + """ + Wait for user specified text from stream. + """ + time.sleep(1) + if timeout < 60: + LOG.info("Expecting text within %s seconds: %s\n", timeout, text) + else: + LOG.info("Expecting text within %s minutes: %s\n", timeout/60, text) + try: + stream.expect_bytes("{}".format(text).encode('utf-8'), timeout=timeout) + except streamexpect.ExpectTimeout: + if fail_ok: + return -1 + else: + stdout.write('\n') + LOG.error("Did not find expected text") + # disconnect(stream) + raise + except Exception as e: + LOG.info("Connection failed with %s", e) + raise + + stdout.write('\n') + LOG.info("Found expected text: %s", text) + + time.sleep(1) + if flush: + try: + incoming = stream.poll(1) # flush input buffers + if incoming: + incoming += b'\n' + try: + LOG.info(">>> expect_bytes: Buffer has bytes!") + stdout.write(incoming.decode('utf-8')) # streamexpect hardcodes it + except Exception: + pass + except streamexpect.ExpectTimeout: + pass + + return 0 + + +def send_bytes(stream, text, fail_ok=False, expect_prompt=True, + prompt=None, timeout=180, send=True, flush=True): + """ + Send user specified text to stream. + """ + time.sleep(1) + if flush: + try: + incoming = stream.poll(1) # flush input buffers + if incoming: + incoming += b'\n' + try: + LOG.info(">>> send_bytes: Buffer has bytes!") + stdout.write(incoming.decode('utf-8')) # streamexpect hardcodes it + except Exception: + pass + except streamexpect.ExpectTimeout: + pass + + LOG.info("Sending text: %s", text) + try: + if send: + stream.sendall("{}\n".format(text).encode('utf-8')) + else: + stream.sendall("{}".format(text).encode('utf-8')) + if expect_prompt: + time.sleep(1) + if prompt: + return expect_bytes(stream, prompt, timeout=timeout, fail_ok=fail_ok) + else: + rc = expect_bytes(stream, "~$", timeout=timeout, fail_ok=True) + if rc != 0: + send_bytes(stream, '\n', expect_prompt=False) + expect_bytes(stream, 'keystone', timeout=timeout) + return + except streamexpect.ExpectTimeout: + if fail_ok: + return -1 + else: + LOG.error("Failed to send text, logging out.") + stream.sendall("exit".encode('utf-8')) + raise + except Exception as e: + LOG.info("Connection failed with %s.", e) + raise + + return 0 diff --git a/deployment/virtualbox/pybox/utils/sftp.py b/deployment/virtualbox/pybox/utils/sftp.py new file mode 100644 index 00000000..f99084ca --- /dev/null +++ b/deployment/virtualbox/pybox/utils/sftp.py @@ -0,0 +1,123 @@ +#!/usr/bin/python3 +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import getpass +import os +import time +import subprocess +import paramiko +from utils.install_log import LOG + + +def sftp_send(source, remote_host, remote_port, destination, username, password): + """ + Send files to remote server + """ + LOG.info("Connecting to server %s with username %s", remote_host, username) + + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + ## TODO(WEI): need to make this timeout handling better + retry = 0 + while retry < 8: + try: + ssh_client.connect(remote_host, port=remote_port, + username=username, password=password, + look_for_keys=False, allow_agent=False) + sftp_client = ssh_client.open_sftp() + retry = 8 + except Exception as e: + LOG.info("******* try again") + retry += 1 + time.sleep(10) + + LOG.info("Sending file from %s to %s", source, destination) + sftp_client.put(source, destination) + LOG.info("Done") + sftp_client.close() + ssh_client.close() + +def send_dir(source, remote_host, remote_port, destination, username, + password, follow_links=True, clear_known_hosts=True): + # Only works from linux for now + if not source.endswith('/') or not source.endswith('\\'): + source = source + '/' + params = { + 'source': source, + 'remote_host': remote_host, + 'destination': destination, + 'port': remote_port, + 'username': username, + 'password': password, + 'follow_links': "L" if follow_links else "", + } + if clear_known_hosts: + if remote_host == '127.0.0.1': + keygen_arg = "[127.0.0.1]:{}".format(remote_port) + else: + keygen_arg = remote_host + cmd = 'ssh-keygen -f "/home/%s/.ssh/known_hosts" -R' \ + ' %s', getpass.getuser(), keygen_arg + LOG.info("CMD: %s", cmd) + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) + for line in iter(process.stdout.readline, b''): + LOG.info("%s", line.decode("utf-8").strip()) + process.wait() + + LOG.info("Running rsync of dir: {source} ->" \ + "{username}@{remote_host}:{destination}".format(**params)) + cmd = ("rsync -av{follow_links} " + "--rsh=\"/usr/bin/sshpass -p {password} ssh -p {port} -o StrictHostKeyChecking=no -l {username}\" " + "{source}* {username}@{remote_host}:{destination}".format(**params)) + LOG.info("CMD: %s", cmd) + + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) + for line in iter(process.stdout.readline, b''): + LOG.info("%s", line.decode("utf-8").strip()) + process.wait() + if process.returncode: + raise Exception("Error in rsync, return code:{}".format(process.returncode)) + + +def send_dir_fallback(source, remote_host, destination, username, password): + """ + Send directory contents to remote server, usually controller-0 + Note: does not send nested directories only files. + args: + - source: full path to directory + e.g. /localhost/loadbuild/jenkins/latest_build/ + - Remote host: name of host to log into, controller-0 by default + e.g. myhost.com + - destination: where to store the file on host: /home/myuser/ + """ + LOG.info("Connecting to server %s with username %s", remote_host, username) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(remote_host, username=username, password=password, look_for_keys=False, allow_agent=False) + sftp_client = ssh_client.open_sftp() + path = '' + send_img = False + for items in os.listdir(source): + path = source+items + if os.path.isfile(path): + if items.endswith('.img'): + remote_path = destination+'images/'+items + LOG.info("Sending file from %s to %s", path, remote_path) + sftp_client.put(path, remote_path) + send_img = True + elif items.endswith('.iso'): + pass + else: + remote_path = destination+items + LOG.info("Sending file from %s to %s", path, remote_path) + sftp_client.put(path, remote_path) + LOG.info("Done") + sftp_client.close() + ssh_client.close() + if send_img: + time.sleep(10) + diff --git a/deployment/virtualbox/pybox/vbox-controlgrp.sh b/deployment/virtualbox/pybox/vbox-controlgrp.sh new file mode 100755 index 00000000..c57bc06e --- /dev/null +++ b/deployment/virtualbox/pybox/vbox-controlgrp.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# +# SPDX-License-Identifier: Apache-2.0 +# + + +GROUP=$1 +ACTION=$2 +SNAP_NAME=$3 + +if [ $# -lt 2 ]; then + echo "Usage: $0 []" + echo "Available cmds:" + echo " Instance actions: pause|resume|poweroff|poweron" + echo " Snapshot actions: take|delete|restore" + + echo "" + + echo "###### Available groups: " + groups=$(vboxmanage list groups) + for grp in $groups; do + grp_txt=${grp:2:-1} + if [ ! -z "$grp_txt" ]; then + echo "$grp_txt" + fi + done + + exit 0 +fi + + +echo "###### Params:" +echo "Group name: $GROUP" +echo "Action: $ACTION" +if [ ! -z "$SNAP_NAME" ]; then + echo "Snapshot name: $SNAP_NAME" +fi + +BASIC_INST_ACTIONS="pause poweroff" +SNAP_ACTIONS="take delete restore" + +get_vms_by_group () { + local group=$1 + vms=$(VBoxManage list -l vms | + awk -v group="/$group" \ + '/^Name:/ { name = $2; } '` + '/^Groups:/ { groups = $2; } '` + '/^UUID:/ { uuid = $2; if (groups == group) print name, uuid; }') + echo "###### VMs in group:" >&2 + echo "$vms" >&2 + echo "$vms" + +} + +if [[ "$SNAP_ACTIONS" = *"$ACTION"* ]]; then + if [ $# -lt 3 ]; then + echo "###### ERROR:" + echo "Action '$ACTION' requires a snapshot name." + fi + vms=$(get_vms_by_group "$GROUP") + echo "#### Executing action on vms" + while read -r vm; do + vm=(${vm}) + echo "Executing '$ACTION' on ${vm[0]}..." + VBoxManage snapshot ${vm[1]} "${ACTION}" "${SNAP_NAME}" + done <<< "$vms" +elif [[ "$BASIC_INST_ACTIONS" = *"$ACTION"* ]]; then + vms=$(get_vms_by_group "$GROUP") + echo "#### Executing action on vms" + while read -r vm; do + vm=(${vm}) + echo "Executing '$ACTION' on '${vm[0]}'..." + VBoxManage controlvm ${vm[1]} "${ACTION}" + done <<< "$vms" + wait +elif [[ "$ACTION" = "resume" ]]; then + echo "resume" + vms=$(get_vms_by_group "$GROUP") + # Getting vm's in saved state + saved_vms="" + while read -r vm; do + vmA=(${vm}) + state=$(vboxmanage showvminfo ${vmA[1]} --machinereadable | + grep "VMState=") + if [[ "$state" = *"saved"* ]]; then + if [ -z "$saved_vms" ]; then + saved_vms="$vm" + else + saved_vms=$(printf '%s\n%s' "$saved_vms" "$vm") + fi + fi + done <<< "$vms" + echo "#### VMs in saved state:" + echo "$saved_vms" + # Powering on each VM + echo "#### Preparing vms for start" + if [ ! -z "$saved_vms" ]; then + while read -r vm; do + vm=(${vm}) + echo "Powering on VM \"${vm[1]}\"." + (VBoxHeadless --start-paused --startvm ${vm[1]} \ + --vrde config >/dev/null 2>&1) & + sleep 1 + while true; do + state=$(vboxmanage showvminfo ${vm[1]} --machinereadable | + grep "VMState=") + if [[ "$state" = *"paused"* ]]; then + break + fi + done + done <<< "$saved_vms" + fi +elif [[ "$ACTION" = "poweron" ]]; then + vms=$(get_vms_by_group "$GROUP") + echo "#### Powering on vms" + while read -r vm; do + vm=(${vm}) + (vboxmanage startvm ${vm[1]} --type headless) & + done <<< "$vms" + wait +elif [[ "$ACTION" = "poweroff" ]]; then + echo "poweroff" +else + echo "###### ERROR:" + echo "ERROR: Action '$ACTION' not supported" +fi +