[OVN] Merge networking-ovn vagrant into neutron

Move networking-ovn/vagrant into neutron/tools/ovn_vagrant
Also added two sample local.conf files for a DB-only and
VTEP nodes.

Co-Authored-By: zhangyanxian <zhangyanxianmail@163.com>
Co-Authored-By: chen-li <shchenli@cn.ibm.com>
Co-Authored-By: Russell Bryant <rbryant@redhat.com>
Co-Authored-By: Kyle Mestery <mestery@mestery.com
Co-Authored-By: Miguel Angel Ajo <majopela@redhat.com>
Co-Authored-By: Richard Theis <rtheis@us.ibm.com>
Co-Authored-By: JUNJIE NAN <nanjj@cn.ibm.com>
Co-Authored-By: Flavio Fernandes <flavio@flaviof.com>
Co-Authored-By: John Kasperski <jckasper@us.ibm.com>
Co-Authored-By: Matthew Kassawara <mkassawara@gmail.com>
Co-Authored-By: venkatamahesh <venkatamaheshkotha@gmail.com>
Co-Authored-By: Tong Li <litong01@us.ibm.com>
Co-Authored-By: venkata anil <anilvenkata@redhat.com>
Co-Authored-By: Vu Cong Tuan <tuanvc@vn.fujitsu.com>
Co-Authored-By: RYAN D. MOATS <rmoats@us.ibm.com>

Change-Id: I12966d5548a60b46edd5c84ee0035eb11671fd8c
Partially-Implements: blueprint neutron-ovn-merge
This commit is contained in:
Brian Haley 2020-02-03 19:10:24 -05:00
parent e9acdb06fa
commit 88a6c92dc7
20 changed files with 743 additions and 14 deletions

View File

@ -0,0 +1,39 @@
#
# Sample DevStack local.conf.
#
# This sample file is intented to be used for running ovn-northd and the
# OVN DBs on a separate node.
#
# For this configuration to work, you *must* set the SERVICE_HOST option to the
# IP address of the main DevStack host.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin neutron https://git.openstack.org/openstack/neutron
disable_all_services
enable_service ovn-northd
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False

View File

@ -0,0 +1,39 @@
#
# Sample DevStack local.conf.
#
# This sample file is intended for running the HW VTEP emulator on a
# separate node.
#
# For this configuration to work, you *must* set the SERVICE_HOST option to the
# IP address of the main DevStack host.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin neutron https://git.openstack.org/openstack/neutron
disable_all_services
enable_service ovn-controller-vtep
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False

View File

@ -9,7 +9,7 @@ to deploy OpenStack with Open Virtual Network (OVN) integration for
the Networking service in production with sufficient expectations
of scale and performance. For evaluation purposes, you can deploy this
environment using the :doc:`Installation Guide </install/ovn/index>` or
`Vagrant <https://github.com/openstack/neutron/tree/master/tools/ovn_vagrant>`_.
`Vagrant <https://github.com/openstack/neutron/tree/master/vagrant/ovn>`_.
Any scaling or performance evaluations should use bare metal instead of
virtual machines.

View File

@ -30,7 +30,7 @@ Neutron logical network setup
-----------------------------
::
vagrant@precise64:~/devstack$ openstack network list
vagrant@bionic64:~/devstack$ openstack network list
+--------------------------------------+---------+----------------------------------------------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+---------+----------------------------------------------------------------------------+
@ -38,7 +38,7 @@ Neutron logical network setup
| 713bae25-8276-4e0a-a453-e59a1d65425a | private | 6fa3bab9-103e-45d5-872c-91f21b52ceda, c5c9f5c2-145d-46d2-a513-cf675530eaed |
+--------------------------------------+---------+----------------------------------------------------------------------------+
vagrant@precise64:~/devstack$ openstack subnet list
vagrant@bionic64:~/devstack$ openstack subnet list
+--------------------------------------+---------------------+--------------------------------------+--------------------+
| ID | Name | Network | Subnet |
+--------------------------------------+---------------------+--------------------------------------+--------------------+
@ -48,7 +48,7 @@ Neutron logical network setup
| c5c9f5c2-145d-46d2-a513-cf675530eaed | private-subnet | 713bae25-8276-4e0a-a453-e59a1d65425a | 10.0.0.0/24 |
+--------------------------------------+---------------------+--------------------------------------+--------------------+
vagrant@precise64:~/devstack$ openstack port list
vagrant@bionic64:~/devstack$ openstack port list
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+
| ID | Name | MAC Address | Fixed IP Addresses | Status |
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+
@ -61,7 +61,7 @@ Neutron logical network setup
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+
vagrant@precise64:~/devstack$ openstack subnet show c5c9f5c2-145d-46d2-a513-cf675530eaed
vagrant@bionic64:~/devstack$ openstack subnet show c5c9f5c2-145d-46d2-a513-cf675530eaed
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
@ -93,13 +93,13 @@ Neutron logical router setup
::
vagrant@precise64:~/devstack$ openstack router list
vagrant@bionic64:~/devstack$ openstack router list
+--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+
| ID | Name | Status | State | Distributed | HA | Project |
+--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+
| 82fa9a47-246e-4da8-a864-53ea8daaed42 | router1 | ACTIVE | UP | False | False | 35e3820f7490493ca9e3a5e685393298 |
+--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+
vagrant@precise64:~/devstack$ openstack router show router1
vagrant@bionic64:~/devstack$ openstack router show router1
+-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+
@ -121,7 +121,7 @@ Neutron logical router setup
| status | ACTIVE |
| updated_at | 2016-11-08T21:55:51Z |
+-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+
vagrant@precise64:~/devstack$ openstack port list --router router1
vagrant@bionic64:~/devstack$ openstack port list --router router1
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+--------+
| ID | Name | MAC Address | Fixed IP Addresses | Status |
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+--------+
@ -143,7 +143,7 @@ Neutron Routers are realized in OpenVSwitch
"router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int"::
vagrant@precise64:~/devstack$ sudo ovs-vsctl show
vagrant@bionic64:~/devstack$ sudo ovs-vsctl show
b9b27fc3-5057-47e7-ba64-0b6afe70a398
Bridge br-int
Port "qr-0ba8700e-da"
@ -182,7 +182,7 @@ Neutron Routers are realized in OpenVSwitch
ovs_version: "1.4.0+build0"
vagrant@precise64:~/devstack$ brctl show
vagrant@bionic64:~/devstack$ brctl show
bridge name bridge id STP enabled interfaces
br-eth1 0000.e2e7fc5ccb4d no
br-ex 0000.82ee46beaf4d no phy-br-ex
@ -215,13 +215,13 @@ namespace. The namespace will have the name "qrouter-<UUID of the router>.
For example::
vagrant@precise64:~$ openstack router list
vagrant@bionic64:~$ openstack router list
+--------------------------------------+---------+-------------------------------------------------------------------------+
| ID | Name | Status | State | Distributed | HA | Project |
+--------------------------------------+---------+-------------------------------------------------------------------------+
| ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | Active | UP | True | False | 35e3820f7490493ca9e3a5e685393298 |
+--------------------------------------+---------+-------------------------------------------------------------------------+
vagrant@precise64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list
vagrant@bionic64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list
18: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo

View File

@ -49,10 +49,10 @@ Deployment
:doc:`/contributor/ovn_vagrant/prerequisites`
#. Clone the ``neutron`` repository locally and change to the
``neutron/tools/ovn_vagrant/sparse`` directory::
``neutron/vagrant/ovn/sparse`` directory::
$ git clone https://opendev.org/openstack/neutron.git
$ cd neutron/tools/ovn_vagrant/sparse
$ cd neutron/vagrant/ovn/sparse
#. If necessary, adjust any configuration in the ``instances.yml`` file.

6
vagrant/ovn/README.rst Normal file
View File

@ -0,0 +1,6 @@
=========================================
Automatic deployment of OVN using Vagrant
=========================================
Please reference the files in /doc/source/contributor/ovn_vagrant/ for more
information about this.

View File

@ -0,0 +1,6 @@
---
ubuntu:
virtualbox: bento/ubuntu-18.04
libvirt: elastic/ubuntu-18.04-x86_64
parallels: bento/ubuntu-18.04
openstack: bionic64

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAv2EyHk382N9LGMPAGbAG9rea6qcO+I+qj7OscU1k8GxnYO0B
hHYPMzfT1RgeBDelNyM22SNiySr4iTQDBQxgunrUCdTaNu5dmzYT68gieqnH+CRR
jpLxXecH2hcvKyFx5qmhMt4zE3QWCDv2JJiB5CGoV7sGCy1aTYbFJBeKwMUMpBwP
67rpBdVcpNjeSkw8FKDvPVx1p1O0YgeI9JoIL0qka6FFgiii/wf5jgr0w/JW15VI
2pYwpHhdnBt3M3BV2HK5cA6nwFhUfAG0HLP5lUGW9/Hk5ng/Wl7cz8nLAdgXf4Uc
IUffO5SH+9/H5VhTMDpaRPgxWuOw1/UGLgf57wIDAQABAoIBAD/+5X6Cv6lZycfn
NWahmUKJFRGgeX4etH9HKsPciINpDIy51EcSH3UWFwzr+qWYYfP1H5MupQr2BpQC
w3u9rt7M0fjTp4C05rJPPAwdKYJxIcBVjLwrYPDwn4yLMievEGJ8mL3k1ZmMuQ1Z
165XHSBHLP7hOF0mdkr0ZRnzkV9yMPjZAI6xnkt/q6EvO34wSZu3/qsmptipHgqB
QQAjPIvJwr7DMoLVpBjLlfGihUB5NAVC0RU+7SIiTAUg0atUzucp+sQMnWlWKVvM
3+nHGC8gR4fUy30LDgxd4eqFyG8EYpTzpN/0bgM3kdwiQTkR/lGvhwmok/o6Nz0n
67ve12ECgYEA41lug+TitPrq9VaLacTBpDOafsmIY30sylBJClcbkQ94NEQyNASg
TsXxRtvYvKuHy0i2xZwagqEyReaTfsScmyFOk/SRFqjgmb3eWYtgF04MtAAmLy9G
5UmPLEm6lLuQGCI7CqLAv3PFCR7W7dX5VYwkteDejZ0NlLeNqKJjIvECgYEA139W
ocUBbWu4ea68JB/qOGrxCMQKn6K3l9kA7tuu+3a0le0G7LF6dr+X1wvboCF/w8CZ
ZqKm35yZoAzyFmfn8oGtJdgbz4Sl3/vZReg86Ca//m4LMe1FkkimT3UW+BKprtEJ
5GiKKWYElknMthbDTpL5EouciPhG0bYKuIMBKt8CgYAL+LqcEWJqu0fCEYOX1zeH
KPx6rqwS6RWBtcaS19FoyxK+VdT67j9uxneVDqCUFsg4ySRutXCj7k8SZTjhFQNW
G+PiYJ9/PPdOwTPDLVarA34hwFxCYc/u5Pe4Ek3T5SiKTMslHTrfGf6HI2uX7IuL
mKyaMzQk6t87NIsuFRb5UQKBgQCzciEEslUe9b127k9S0ZSriDnQb9bc2ZWCB7zk
KeELGu0Dj43dmWh968sX0pL/RAXtTrsuoTDOMcwnX8BTchDOerdhNRTrd+zcmA50
TRAyzNnBl4cQ+yCc0IxUzA7lYj0UCpPvNDIgiQg20Zt64XefPXnUvJcL45qtVKaW
wNg/BwKBgFyhjxftMwAJJF2Hcq5s8QvNhznBgLtne7jnQkHU4qcJx6tcR1hy0Jqe
8/zkr5+41EaFU2jjGn8cnUrlS/Vc/HZg3rmHYycX5wg9hrg1j4hokSHjsGL6Y7yn
8oXIWJSqpxuMjfRh1Tb81Fg05emrMjTy6aLuGS0siUlTPzflD0RI
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/YTIeTfzY30sYw8AZsAb2t5rqpw74j6qPs6xxTWTwbGdg7QGEdg8zN9PVGB4EN6U3IzbZI2LJKviJNAMFDGC6etQJ1No27l2bNhPryCJ6qcf4JFGOkvFd5wfaFy8rIXHmqaEy3jMTdBYIO/YkmIHkIahXuwYLLVpNhsUkF4rAxQykHA/ruukF1Vyk2N5KTDwUoO89XHWnU7RiB4j0mggvSqRroUWCKKL/B/mOCvTD8lbXlUjaljCkeF2cG3czcFXYcrlwDqfAWFR8AbQcs/mVQZb38eTmeD9aXtzPycsB2Bd/hRwhR987lIf738flWFMwOlpE+DFa47DX9QYuB/nv vagrant@ovn

View File

@ -0,0 +1,19 @@
#!/bin/bash
function provider_setup {
# Save the existing address from eth2 and add it to br-provider
if ip a | grep enp0; then
PROV_IF=enp0s9
else
PROV_IF=eth2
fi
PROVADDR=$(ip -4 addr show $PROV_IF | grep -oP "(?<=inet ).*(?= brd)")
if [ -n "$PROVADDR" ]; then
sudo ip addr flush dev $PROV_IF
sudo ip addr add $PROVADDR dev br-provider
sudo ip link set br-provider up
sudo ovs-vsctl --may-exist add-port br-provider $PROV_IF
fi
}

View File

@ -0,0 +1,42 @@
def provider_box(provider)
distro = ENV.fetch('DISTRO', 'ubuntu')
boxes = YAML.load_file('../provisioning/boxes.yml')[distro]
# we can always override the box via the VAGRANT_OVN_VM_BOX
# environment variable
return ENV.fetch('VAGRANT_OVN_VM_BOX', boxes[provider])
end
def configure_providers(vm, config)
vm.provider 'virtualbox' do |vb, cfg|
cfg.vm.box = provider_box('virtualbox')
vb.memory = config['memory']
vb.cpus = config['cpus']
vb.customize [
'modifyvm', :id,
'--nicpromisc3', "allow-all"
]
vb.customize [
"guestproperty", "set", :id,
"/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000
]
end
vm.provider 'parallels' do |vb, cfg|
cfg.vm.box = provider_box('parallels')
vb.memory = config['memory']
vb.cpus = config['cpus']
vb.customize ['set', :id, '--nested-virt', 'on']
end
vm.provider 'libvirt' do |vb, cfg|
cfg.vm.box = provider_box('libvirt')
vb.memory = config['memory']
vb.cpus = config['cpus']
vb.nested = true
vb.graphics_type = 'spice'
vb.video_type = 'qxl'
vb.suspend_mode = 'managedsave'
end
end

View File

@ -0,0 +1,104 @@
#!/bin/sh
# Script Arguments:
# $1 - MTU
# $2 - ovn-db IP address
# $3 - ovn-db short name
# $4 - ovn-controller IP address
# $5 - ovn-controller short name
# $6 - ovn-compute1 IP address
# $7 - ovn-compute1 short name
# $8 - ovn-compute2 IP address
# $9 - ovn-compute2 short name
# $10 - ovn-vtep IP address
# $11 - ovn-vtep short name
MTU=$1
OVN_DB_IP=$2
OVN_DB_NAME=$3
OVN_CONTROLLER_IP=$4
OVN_CONTROLLER_NAME=$5
OVN_COMPUTE1_IP=$6
OVN_COMPUTE1_NAME=$7
OVN_COMPUTE2_IP=$8
OVN_COMPUTE2_NAME=$9
OVN_VTEP_IP=$10
OVN_VTEP_NAME=$11
BASE_PACKAGES="git bridge-utils ebtables python-pip python-dev build-essential ntp"
DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update
DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy $BASE_PACKAGES
echo export LC_ALL=en_US.UTF-8 >> ~/.bash_profile
echo export LANG=en_US.UTF-8 >> ~/.bash_profile
# FIXME(mestery): Remove once Vagrant boxes allow apt-get to work again
sudo rm -rf /var/lib/apt/lists/*
sudo apt-get install -y git
# FIXME(mestery): By default, Ubuntu ships with /bin/sh pointing to
# the dash shell.
# ..
# ..
# The dots above represent a pause as you pick yourself up off the
# floor. This means the latest version of "install_docker.sh" to load
# docker fails because dash can't interpret some of it's bash-specific
# things. It's a bug in install_docker.sh that it relies on those and
# uses a shebang of /bin/sh, but that doesn't help us if we want to run
# docker and specifically Kuryr. So, this works around that.
sudo update-alternatives --install /bin/sh sh /bin/bash 100
if [ ! -d "devstack" ]; then
git clone https://git.openstack.org/openstack-dev/devstack.git
fi
# If available, use repositories on host to facilitate testing local changes.
# Vagrant requires that shared folders exist on the host, so additionally
# check for the ".git" directory in case the parent exists but lacks
# repository contents.
if [ ! -d "neutron/.git" ]; then
git clone https://git.openstack.org/openstack/neutron.git
fi
# Use neutron in vagrant home directory when stacking.
sudo mkdir /opt/stack
sudo chown vagrant:vagrant /opt/stack
ln -s ~/neutron /opt/stack/neutron
# We need swap space to do any sort of scale testing with the Vagrant config.
# Without this, we quickly run out of RAM and the kernel starts whacking things.
sudo rm -f /swapfile1
sudo dd if=/dev/zero of=/swapfile1 bs=1024 count=2097152
sudo chown root:root /swapfile1
sudo chmod 0600 /swapfile1
sudo mkswap /swapfile1
sudo swapon /swapfile1
# Configure MTU on VM interfaces. Also requires manually configuring the same MTU on
# the equivalent 'vboxnet' interfaces on the host.
if ip a | grep enp0; then
sudo ip link set dev enp0s8 mtu $MTU
sudo ip link set dev enp0s9 mtu $MTU
else
sudo ip link set dev eth1 mtu $MTU
sudo ip link set dev eth2 mtu $MTU
fi
# Migration setup
sudo sh -c "echo \"$OVN_DB_IP $OVN_DB_NAME\" >> /etc/hosts"
sudo sh -c "echo \"$OVN_CONTROLLER_IP $OVN_CONTROLLER_NAME\" >> /etc/hosts"
sudo sh -c "echo \"$OVN_COMPUTE1_IP $OVN_COMPUTE1_NAME\" >> /etc/hosts"
sudo sh -c "echo \"$OVN_COMPUTE2_IP $OVN_COMPUTE2_NAME\" >> /etc/hosts"
sudo sh -c "echo \"$OVN_VTEP_IP $OVN_VTEP_NAME\" >> /etc/hosts"
# Non-interactive SSH setup
cp neutron/vagrant/ovn/provisioning/id_rsa ~/.ssh/id_rsa
cat neutron/vagrant/ovn/provisioning/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/id_rsa
echo "Host *" >> ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
chmod 600 ~/.ssh/config
sudo mkdir /root/.ssh
chmod 700 /root/.ssh
sudo cp ~vagrant/.ssh/id_rsa /root/.ssh
sudo cp ~vagrant/.ssh/authorized_keys /root/.ssh
sudo cp ~vagrant/.ssh/config /root/.ssh/config

View File

@ -0,0 +1,103 @@
#!/usr/bin/env bash
# Script Arguments:
# $1 - ovn-controller IP address
# $2 - ovn-db IP address
OVN_CONTROLLER_IP=$1
OVN_DB_IP=$2
cp neutron/devstack/ovn-compute-local.conf.sample devstack/local.conf
sed -i -e 's/<IP address of host running everything else>/'$OVN_CONTROLLER_IP'/g' devstack/local.conf
sudo umount /opt/stack/data/nova/instances
# Get the IP address
if ip a | grep enp0 ; then
ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)")
else
ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)")
fi
# Fixup HOST_IP with the local IP address
sed -i -e 's/<IP address of current host>/'$ipaddress'/g' devstack/local.conf
# Adjust some things in local.conf
cat << DEVSTACKEOF >> devstack/local.conf
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
Q_HOST=$1
HOSTNAME=$(hostname)
OVN_SB_REMOTE=tcp:$OVN_DB_IP:6642
OVN_NB_REMOTE=tcp:$OVN_DB_IP:6641
# Enable logging to files.
LOGFILE=/opt/stack/log/stack.sh.log
# Use provider network for public.
Q_USE_PROVIDERNET_FOR_PUBLIC=True
OVS_PHYSICAL_BRIDGE=br-provider
PHYSICAL_NETWORK=provider
# Until OVN supports NAT, the private network IP address range
# must not conflict with IP address ranges on the host. Change
# as necessary for your environment.
NETWORK_GATEWAY=172.16.1.1
FIXED_RANGE=172.16.1.0/24
ENABLE_CHASSIS_AS_GW=False
DEVSTACKEOF
# Add unique post-config for DevStack here using a separate 'cat' with
# single quotes around EOF to prevent interpretation of variables such
# as $Q_DHCP_CONF_FILE.
cat << 'DEVSTACKEOF' >> devstack/local.conf
# Set the availablity zone name (default is nova) for the DHCP service.
[[post-config|$Q_DHCP_CONF_FILE]]
[AGENT]
availability_zone = nova
DEVSTACKEOF
devstack/stack.sh
# Build the provider network in OVN. You can enable instances to access
# external networks such as the Internet by using the IP address of the host
# vboxnet interface for the provider network (typically vboxnet1) as the
# gateway for the subnet on the neutron provider network. Also requires
# enabling IP forwarding and configuring SNAT on the host. See the README for
# more information.
source /vagrant/provisioning/provider-setup.sh
provider_setup
# Add host route for the private network, at least until the native L3 agent
# supports NAT.
# FIXME(mkassawara): Add support for IPv6.
source devstack/openrc admin admin
ROUTER_GATEWAY=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\"`
sudo ip route add $FIXED_RANGE via $ROUTER_GATEWAY
# NFS Setup
sudo apt-get update
sudo apt-get install -y nfs-common
sudo mkdir -p /opt/stack/data/nova/instances
sudo chmod o+x /opt/stack/data/nova/instances
sudo chown vagrant:vagrant /opt/stack/data/nova/instances
sudo sh -c "echo \"$OVN_CONTROLLER_IP:/opt/stack/data/nova/instances /opt/stack/data/nova/instances nfs defaults 0 0\" >> /etc/fstab"
sudo mount /opt/stack/data/nova/instances
sudo chown vagrant:vagrant /opt/stack/data/nova/instances
sudo sh -c "echo \"listen_tls = 0\" >> /etc/libvirt/libvirtd.conf"
sudo sh -c "echo \"listen_tcp = 1\" >> /etc/libvirt/libvirtd.conf"
sudo sh -c "echo -n \"auth_tcp =\" >> /etc/libvirt/libvirtd.conf"
sudo sh -c 'echo " \"none\"" >> /etc/libvirt/libvirtd.conf'
sudo sh -c "sed -i 's/env libvirtd_opts\=\"\-d\"/env libvirtd_opts\=\"-d -l\"/g' /etc/init/libvirt-bin.conf"
sudo sh -c "sed -i 's/libvirtd_opts\=\"\-d\"/libvirtd_opts\=\"\-d \-l\"/g' /etc/default/libvirt-bin"
sudo /etc/init.d/libvirt-bin restart
# Set the OVN_*_DB variables to enable OVN commands using a remote database.
echo -e "\n# Enable OVN commands using a remote database.
export OVN_NB_DB=$OVN_NB_REMOTE
export OVN_SB_DB=$OVN_SB_REMOTE" >> ~/.bash_profile

View File

@ -0,0 +1,130 @@
#!/usr/bin/env bash
# Script Arguments:
# $1 - ovn-db IP address
# $2 - provider network starting IP address
# $3 - provider network ending IP address
# $4 - provider network gateway
# $5 - provider network network
# $6 - ovn vm subnet
ovnip=$1
start_ip=$2
end_ip=$3
gateway=$4
network=$5
ovn_vm_subnet=$6
# Get the IP address
if ip a | grep enp0 ; then
ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)")
else
ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)")
fi
# Adjust some things in local.conf
cat << DEVSTACKEOF >> devstack/local.conf.vagrant
# Good to set these
HOST_IP=$ipaddress
HOSTNAME=$(hostname)
SERVICE_HOST_NAME=${HOST_NAME}
SERVICE_HOST=$ipaddress
OVN_SB_REMOTE=tcp:$ovnip:6642
OVN_NB_REMOTE=tcp:$ovnip:6641
# Enable logging to files.
LOGFILE=/opt/stack/log/stack.sh.log
# Disable the ovn-northd service on the controller node because the
# architecture includes a separate OVN database server.
disable_service ovn-northd
# Disable the ovn-controller service because the architecture lacks services
# on the controller node that depend on it.
disable_service ovn-controller
# Disable the ovn metadata agent.
disable_service neutron-ovn-metadata-agent
# Disable the nova compute service on the controller node because the
# architecture only deploys it on separate compute nodes.
disable_service n-cpu
# Disable cinder services and tempest to reduce deployment time.
disable_service c-api c-sch c-vol tempest
# Until OVN supports NAT, the private network IP address range
# must not conflict with IP address ranges on the host. Change
# as necessary for your environment.
NETWORK_GATEWAY=172.16.1.1
FIXED_RANGE=172.16.1.0/24
# Use provider network for public.
Q_USE_PROVIDERNET_FOR_PUBLIC=True
OVS_PHYSICAL_BRIDGE=br-provider
PHYSICAL_NETWORK=provider
PUBLIC_NETWORK_NAME=provider
PUBLIC_NETWORK_GATEWAY="$gateway"
PUBLIC_PHYSICAL_NETWORK=provider
PUBLIC_SUBNET_NAME=provider-v4
IPV6_PUBLIC_SUBNET_NAME=provider-v6
Q_FLOATING_ALLOCATION_POOL="start=$start_ip,end=$end_ip"
FLOATING_RANGE="$network"
# If the admin wants to enable this chassis to host gateway routers for
# external connectivity, then set ENABLE_CHASSIS_AS_GW to True.
# Then devstack will set ovn-cms-options with enable-chassis-as-gw
# in Open_vSwitch table's external_ids column
ENABLE_CHASSIS_AS_GW=True
DEVSTACKEOF
# Add unique post-config for DevStack here using a separate 'cat' with
# single quotes around EOF to prevent interpretation of variables such
# as $NEUTRON_CONF.
cat << 'DEVSTACKEOF' >> devstack/local.conf.vagrant
# Enable two DHCP agents per neutron subnet with support for availability
# zones. Requires two or more compute nodes.
[[post-config|/$NEUTRON_CONF]]
[DEFAULT]
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler
dhcp_load_type = networks
dhcp_agents_per_network = 2
# Configure the Compute service (nova) metadata API to use the X-Forwarded-For
# header sent by the Networking service metadata proxies on the compute nodes.
[[post-config|$NOVA_CONF]]
[DEFAULT]
use_forwarded_for = True
DEVSTACKEOF
sed '/#EXTRA_CONFIG/ r devstack/local.conf.vagrant' \
neutron/devstack/ovn-local.conf.sample > devstack/local.conf
devstack/stack.sh
# Make the provider network shared and enable DHCP for its v4 subnet.
source devstack/openrc admin admin
neutron net-update --shared $PUBLIC_NETWORK_NAME
neutron subnet-update --enable_dhcp=True $PUBLIC_SUBNET_NAME
# NFS server setup
sudo apt-get update
sudo apt-get install -y nfs-kernel-server nfs-common
sudo mkdir -p /opt/stack/data/nova/instances
sudo touch /etc/exports
sudo sh -c "echo \"/opt/stack/data/nova/instances $ovn_vm_subnet(rw,sync,fsid=0,no_root_squash)\" >> /etc/exports"
sudo service nfs-kernel-server restart
sudo service nfs-idmapd restart
# Set the OVN_*_DB variables to enable OVN commands using a remote database.
echo -e "\n# Enable OVN commands using a remote database.
export OVN_NB_DB=$OVN_NB_REMOTE
export OVN_SB_DB=$OVN_SB_REMOTE" >> ~/.bash_profile

View File

@ -0,0 +1,29 @@
#!/usr/bin/env bash
CONTROLLER_IP=$1
cp neutron/devstack/ovn-db-local.conf.sample devstack/local.conf
if [ "$CONTROLLER_IP" != "" ]; then
sed -i -e 's/<IP address of host running everything else>/'$CONTROLLER_IP'/g' devstack/local.conf
fi
# Get the IP address
if ip a | grep enp0 ; then
ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)")
else
ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)")
fi
# Adjust some things in local.conf
cat << DEVSTACKEOF >> devstack/local.conf
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
Q_HOST=$CONTROLLER_IP
HOST_IP=$ipaddress
HOSTNAME=$(hostname)
# Enable logging to files.
LOGFILE=/opt/stack/log/stack.sh.log
DEVSTACKEOF
devstack/stack.sh

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
OVN_DB_IP=$2
cp neutron/devstack/ovn-vtep-local.conf.sample devstack/local.conf
if [ "$1" != "" ]; then
sed -i -e 's/<IP address of host running everything else>/'$1'/g' devstack/local.conf
fi
# Get the IP address
if ip a | grep enp0 ; then
ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)")
else
ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)")
fi
# Adjust some things in local.conf
cat << DEVSTACKEOF >> devstack/local.conf
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
Q_HOST=$1
HOST_IP=$ipaddress
HOSTNAME=$(hostname)
OVN_SB_REMOTE=tcp:$OVN_DB_IP:6642
OVN_NB_REMOTE=tcp:$OVN_DB_IP:6641
# Enable logging to files.
LOGFILE=/opt/stack/log/stack.sh.log
DEVSTACKEOF
devstack/stack.sh

View File

@ -0,0 +1,9 @@
===================
Sparse architecture
===================
Please reference the files in
/doc/source/contributor/ovn_vagrant/sparse-architecture.rst for more
information about this architecture.

96
vagrant/ovn/sparse/Vagrantfile vendored Normal file
View File

@ -0,0 +1,96 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
require 'ipaddr'
require '../provisioning/providers.rb'
vagrant_config = YAML.load_file("instances.yml")
Vagrant.configure(2) do |config|
if Vagrant.has_plugin?("vagrant-cachier")
# Configure cached packages to be shared between instances of the same base box.
# More info on http://fgrehm.viewdocs.io/vagrant-cachier/usage
config.cache.scope = :box
end
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder File.expand_path(".."), "/vagrant"
config.vm.synced_folder File.expand_path("../.."), "/home/vagrant/neutron"
# Use the ipaddr library to calculate the netmask of a given network
net = IPAddr.new vagrant_config['provider_network']
netmask = net.inspect().split("/")[1].split(">")[0]
# Build the common args for the setup-base.sh scripts.
setup_base_common_args = "#{vagrant_config['ovndb']['ip']} #{vagrant_config['ovndb']['short_name']} " +
"#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovncontroller']['short_name']} " +
"#{vagrant_config['ovncompute1']['ip']} #{vagrant_config['ovncompute1']['short_name']} " +
"#{vagrant_config['ovncompute2']['ip']} #{vagrant_config['ovncompute2']['short_name']} " +
"#{vagrant_config['ovnvtep']['ip']} #{vagrant_config['ovnvtep']['short_name']} "
# Bring up the Devstack ovsdb/ovn-northd node
config.vm.define "ovn-db" do |ovndb|
cfg = vagrant_config['ovndb']
ovndb.vm.host_name = cfg['host_name']
ovndb.vm.network "private_network", ip: cfg['ip']
ovndb.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask
ovndb.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false,
:args => "#{vagrant_config['ovndb']['mtu']} #{setup_base_common_args}"
ovndb.vm.provision "shell", path: "../provisioning/setup-db.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']}"
configure_providers(ovndb.vm, cfg)
end
# Bring up the Devstack controller node on Virtualbox
config.vm.define "ovn-controller", primary: true do |ovncontroller|
cfg = vagrant_config['ovncontroller']
ovncontroller.vm.host_name = cfg['host_name']
ovncontroller.vm.network "private_network", ip: cfg['ip']
ovncontroller.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask
ovncontroller.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false,
:args => "#{cfg['mtu']} #{setup_base_common_args}"
ovncontroller.vm.provision "shell", path: "../provisioning/setup-controller.sh", privileged: false,
:args => "#{vagrant_config['ovndb']['ip']} #{vagrant_config['provider_start_ip']} #{vagrant_config['provider_end_ip']} " +
"#{vagrant_config['provider_gateway']} #{vagrant_config['provider_network']} #{vagrant_config['ovn_vm_subnet']}"
configure_providers(ovncontroller.vm, cfg)
end
config.vm.define "ovn-vtep", autostart: false do |ovnvtep|
cfg = vagrant_config['ovnvtep']
ovnvtep.vm.host_name = cfg['host_name']
ovnvtep.vm.network "private_network", ip: cfg['ip']
ovnvtep.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask
ovnvtep.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false,
:args => "#{cfg['mtu']} #{setup_base_common_args}"
ovnvtep.vm.provision "shell", path: "../provisioning/setup-vtep.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}"
configure_providers(ovnvtep.vm, cfg)
end
# Bring up the first Devstack compute node on Virtualbox
config.vm.define "ovn-compute1" do |ovncompute1|
cfg = vagrant_config['ovncompute1']
ovncompute1.vm.host_name = cfg['host_name']
ovncompute1.vm.network "private_network", ip: cfg['ip']
ovncompute1.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask
ovncompute1.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false,
:args => "#{cfg['mtu']} #{setup_base_common_args}"
ovncompute1.vm.provision "shell", path: "../provisioning/setup-compute.sh", privileged: false,
:args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}"
configure_providers(ovncompute1.vm, cfg)
end
# Bring up the second Devstack compute node on Virtualbox
config.vm.define "ovn-compute2" do |ovncompute2|
cfg = vagrant_config['ovncompute2']
ovncompute2.vm.host_name = cfg['host_name']
ovncompute2.vm.network "private_network", ip: cfg['ip']
ovncompute2.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask
ovncompute2.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false,
:args => "#{cfg['mtu']} #{setup_base_common_args}"
ovncompute2.vm.provision "shell", path: "../provisioning/setup-compute.sh", privileged: false,
:args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}"
configure_providers(ovncompute2.vm, cfg)
end
end

View File

@ -0,0 +1,46 @@
---
provider_network: "10.10.0.0/16"
provider_gateway: "10.10.0.1"
provider_start_ip: "10.10.0.101"
provider_end_ip: "10.10.255.250"
ovn_vm_subnet: "192.168.33.0/24"
ovndb:
short_name: "ovn-db"
host_name: "ovn-db.devstack.dev"
ip: "192.168.33.11"
prov-ip: "10.10.0.11"
memory: 512
cpus: 2
mtu: 1500
ovncontroller:
short_name: "ovn-controller"
host_name: "ovn-controller.devstack.dev"
ip: "192.168.33.12"
prov-ip: "10.10.0.12"
memory: 3072
cpus: 2
mtu: 1500
ovnvtep:
short_name: "ovn-vtep"
host_name: "ovn-vtep.devstack.dev"
ip: "192.168.33.13"
prov-ip: "10.10.0.13"
memory: 512
cpus: 1
mtu: 1500
ovncompute1:
short_name: "ovn-compute1"
host_name: "ovn-compute1.devstack.dev"
ip: "192.168.33.31"
prov-ip: "10.10.0.31"
memory: 1536
cpus: 1
mtu: 1500
ovncompute2:
short_name: "ovn-compute2"
host_name: "ovn-compute2.devstack.dev"
ip: "192.168.33.32"
prov-ip: "10.10.0.32"
memory: 1536
cpus: 1
mtu: 1500

View File

@ -35,6 +35,7 @@
- ^tox.ini$
- ^vagrant/.*$
- ^migration/.*$
- ^devstack/.*\.sample$
- tempest-multinode-full-py3:
voting: false
irrelevant-files: *irrelevant-files