Mova nova configuration initialization
* Move shared volume configuration from stack.sh to stackrc * Move Nova network and vnc/spice configuration settings from stack.sh into lib/nova * Rename NET_MAN to NETOWRK_MANAGER to match nova.conf attribute name Change-Id: I9bd2955def553499aa832eda1f0959afe494206a
This commit is contained in:
		
							
								
								
									
										98
									
								
								lib/nova
									
									
									
									
									
								
							
							
						
						
									
										98
									
								
								lib/nova
									
									
									
									
									
								
							@@ -66,6 +66,59 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 | 
			
		||||
QEMU_CONF=/etc/libvirt/qemu.conf
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Nova Network Configuration
 | 
			
		||||
# --------------------------
 | 
			
		||||
 | 
			
		||||
# Set defaults according to the virt driver
 | 
			
		||||
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=eth3
 | 
			
		||||
    GUEST_INTERFACE_DEFAULT=eth1
 | 
			
		||||
    # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
 | 
			
		||||
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
 | 
			
		||||
    NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=eth0
 | 
			
		||||
    FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=br100
 | 
			
		||||
    STUB_NETWORK=${STUB_NETWORK:-False}
 | 
			
		||||
else
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=br100
 | 
			
		||||
    GUEST_INTERFACE_DEFAULT=eth0
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=br100
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
 | 
			
		||||
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
 | 
			
		||||
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
 | 
			
		||||
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
 | 
			
		||||
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
 | 
			
		||||
 | 
			
		||||
# If you are using the FlatDHCP network mode on multiple hosts, set the
 | 
			
		||||
# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
 | 
			
		||||
# have an IP or you risk breaking things.
 | 
			
		||||
#
 | 
			
		||||
# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
 | 
			
		||||
# hiccup while the network is moved from the flat interface to the flat network
 | 
			
		||||
# bridge.  This will happen when you launch your first instance.  Upon launch
 | 
			
		||||
# you will lose all connectivity to the node, and the VM launch will probably
 | 
			
		||||
# fail.
 | 
			
		||||
#
 | 
			
		||||
# If you are running on a single node and don't need to access the VMs from
 | 
			
		||||
# devices other than that node, you can set ``FLAT_INTERFACE=``
 | 
			
		||||
# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
 | 
			
		||||
FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
 | 
			
		||||
 | 
			
		||||
# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
 | 
			
		||||
# allows network operations and routing for a VM to occur on the server that is
 | 
			
		||||
# running the VM - removing a SPOF and bandwidth bottleneck.
 | 
			
		||||
MULTI_HOST=`trueorfalse False $MULTI_HOST`
 | 
			
		||||
 | 
			
		||||
# Test floating pool and range are used for testing.  They are defined
 | 
			
		||||
# here until the admin APIs can replace nova-manage
 | 
			
		||||
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
 | 
			
		||||
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Entry Points
 | 
			
		||||
# ------------
 | 
			
		||||
 | 
			
		||||
@@ -439,6 +492,49 @@ function create_nova_conf() {
 | 
			
		||||
        # Replace the first '=' with ' ' for iniset syntax
 | 
			
		||||
        iniset $NOVA_CONF DEFAULT ${I/=/ }
 | 
			
		||||
    done
 | 
			
		||||
 | 
			
		||||
    # All nova-compute workers need to know the vnc configuration options
 | 
			
		||||
    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
 | 
			
		||||
    if is_service_enabled n-cpu; then
 | 
			
		||||
        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
 | 
			
		||||
        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
 | 
			
		||||
        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
 | 
			
		||||
        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
 | 
			
		||||
        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
 | 
			
		||||
        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
 | 
			
		||||
    fi
 | 
			
		||||
    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
 | 
			
		||||
        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
 | 
			
		||||
    else
 | 
			
		||||
        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
 | 
			
		||||
      # Address on which instance vncservers will listen on compute hosts.
 | 
			
		||||
      # For multi-host, this should be the management ip of the compute host.
 | 
			
		||||
      VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vnc_enabled true
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
 | 
			
		||||
    else
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vnc_enabled false
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if is_service_enabled n-spice; then
 | 
			
		||||
      # Address on which instance spiceservers will listen on compute hosts.
 | 
			
		||||
      # For multi-host, this should be the management ip of the compute host.
 | 
			
		||||
      SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
 | 
			
		||||
      SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
 | 
			
		||||
      iniset $NOVA_CONF spice enabled true
 | 
			
		||||
      iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
 | 
			
		||||
      iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
 | 
			
		||||
    else
 | 
			
		||||
      iniset $NOVA_CONF spice enabled false
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
 | 
			
		||||
    iniset_rpc_backend nova $NOVA_CONF DEFAULT
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# create_nova_cache_dir() - Part of the init_nova() process
 | 
			
		||||
@@ -450,7 +546,7 @@ function create_nova_cache_dir() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function create_nova_conf_nova_network() {
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN"
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
 | 
			
		||||
 
 | 
			
		||||
@@ -53,7 +53,7 @@
 | 
			
		||||
# that must be set in localrc for connectivity across hosts with
 | 
			
		||||
# Quantum.
 | 
			
		||||
#
 | 
			
		||||
# With Quantum networking the NET_MAN variable is ignored.
 | 
			
		||||
# With Quantum networking the NETWORK_MANAGER variable is ignored.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Save trace setting
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										105
									
								
								stack.sh
									
									
									
									
									
								
							
							
						
						
									
										105
									
								
								stack.sh
									
									
									
									
									
								
							@@ -278,11 +278,6 @@ SWIFT3_DIR=$DEST/swift3
 | 
			
		||||
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
 | 
			
		||||
CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
 | 
			
		||||
 | 
			
		||||
# Name of the LVM volume group to use/create for iscsi volumes
 | 
			
		||||
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 | 
			
		||||
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 | 
			
		||||
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 | 
			
		||||
 | 
			
		||||
# Generic helper to configure passwords
 | 
			
		||||
function read_password {
 | 
			
		||||
    XTRACE=$(set +o | grep xtrace)
 | 
			
		||||
@@ -326,64 +321,6 @@ function read_password {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Nova Network Configuration
 | 
			
		||||
# --------------------------
 | 
			
		||||
 | 
			
		||||
# FIXME: more documentation about why these are important options.  Also
 | 
			
		||||
# we should make sure we use the same variable names as the option names.
 | 
			
		||||
 | 
			
		||||
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=eth3
 | 
			
		||||
    # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
 | 
			
		||||
    GUEST_INTERFACE_DEFAULT=eth1
 | 
			
		||||
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=eth0
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=br100
 | 
			
		||||
    FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
 | 
			
		||||
    FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
 | 
			
		||||
    NET_MAN=${NET_MAN:-FlatManager}
 | 
			
		||||
    STUB_NETWORK=${STUB_NETWORK:-False}
 | 
			
		||||
else
 | 
			
		||||
    PUBLIC_INTERFACE_DEFAULT=br100
 | 
			
		||||
    FLAT_NETWORK_BRIDGE_DEFAULT=br100
 | 
			
		||||
    GUEST_INTERFACE_DEFAULT=eth0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
 | 
			
		||||
NET_MAN=${NET_MAN:-FlatDHCPManager}
 | 
			
		||||
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
 | 
			
		||||
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
 | 
			
		||||
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
 | 
			
		||||
FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
 | 
			
		||||
 | 
			
		||||
# Test floating pool and range are used for testing.  They are defined
 | 
			
		||||
# here until the admin APIs can replace nova-manage
 | 
			
		||||
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
 | 
			
		||||
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
 | 
			
		||||
 | 
			
		||||
# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
 | 
			
		||||
# allows network operations and routing for a VM to occur on the server that is
 | 
			
		||||
# running the VM - removing a SPOF and bandwidth bottleneck.
 | 
			
		||||
MULTI_HOST=`trueorfalse False $MULTI_HOST`
 | 
			
		||||
 | 
			
		||||
# If you are using the FlatDHCP network mode on multiple hosts, set the
 | 
			
		||||
# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
 | 
			
		||||
# have an IP or you risk breaking things.
 | 
			
		||||
#
 | 
			
		||||
# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
 | 
			
		||||
# hiccup while the network is moved from the flat interface to the flat network
 | 
			
		||||
# bridge.  This will happen when you launch your first instance.  Upon launch
 | 
			
		||||
# you will lose all connectivity to the node, and the VM launch will probably
 | 
			
		||||
# fail.
 | 
			
		||||
#
 | 
			
		||||
# If you are running on a single node and don't need to access the VMs from
 | 
			
		||||
# devices other than that node, you can set ``FLAT_INTERFACE=``
 | 
			
		||||
# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
 | 
			
		||||
FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
 | 
			
		||||
 | 
			
		||||
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
 | 
			
		||||
 | 
			
		||||
# Database Configuration
 | 
			
		||||
# ----------------------
 | 
			
		||||
 | 
			
		||||
@@ -980,48 +917,6 @@ if is_service_enabled nova; then
 | 
			
		||||
    elif is_service_enabled n-net; then
 | 
			
		||||
        create_nova_conf_nova_network
 | 
			
		||||
    fi
 | 
			
		||||
    # All nova-compute workers need to know the vnc configuration options
 | 
			
		||||
    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
 | 
			
		||||
    if is_service_enabled n-cpu; then
 | 
			
		||||
        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
 | 
			
		||||
        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
 | 
			
		||||
        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
 | 
			
		||||
        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
 | 
			
		||||
        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
 | 
			
		||||
        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
 | 
			
		||||
    fi
 | 
			
		||||
    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
 | 
			
		||||
        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
 | 
			
		||||
    else
 | 
			
		||||
        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
 | 
			
		||||
      # Address on which instance vncservers will listen on compute hosts.
 | 
			
		||||
      # For multi-host, this should be the management ip of the compute host.
 | 
			
		||||
      VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vnc_enabled true
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
 | 
			
		||||
    else
 | 
			
		||||
      iniset $NOVA_CONF DEFAULT vnc_enabled false
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if is_service_enabled n-spice; then
 | 
			
		||||
      # Address on which instance spiceservers will listen on compute hosts.
 | 
			
		||||
      # For multi-host, this should be the management ip of the compute host.
 | 
			
		||||
      SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
 | 
			
		||||
      SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
 | 
			
		||||
      iniset $NOVA_CONF spice enabled true
 | 
			
		||||
      iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
 | 
			
		||||
      iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
 | 
			
		||||
    else
 | 
			
		||||
      iniset $NOVA_CONF spice enabled false
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
 | 
			
		||||
    iniset_rpc_backend nova $NOVA_CONF DEFAULT
 | 
			
		||||
    iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # XenServer
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										5
									
								
								stackrc
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								stackrc
									
									
									
									
									
								
							@@ -196,5 +196,10 @@ esac
 | 
			
		||||
# 5Gb default volume backing file size
 | 
			
		||||
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
 | 
			
		||||
 | 
			
		||||
# Name of the LVM volume group to use/create for iscsi volumes
 | 
			
		||||
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 | 
			
		||||
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 | 
			
		||||
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 | 
			
		||||
 | 
			
		||||
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 | 
			
		||||
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
 | 
			
		||||
 
 | 
			
		||||
@@ -6,7 +6,7 @@
 | 
			
		||||
SHELL_AFTER_RUN=no
 | 
			
		||||
 | 
			
		||||
# Variables common amongst all hosts in the cluster
 | 
			
		||||
COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
 | 
			
		||||
COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
 | 
			
		||||
 | 
			
		||||
# Helper to launch containers
 | 
			
		||||
function run_bm {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user