Browse Source

Remove dragonflow

Dragonflow was removed from governance in 2018 and is now being retired.
This cleans up references to dragonflow jobs and configuration.

http://lists.openstack.org/pipermail/openstack-discuss/2020-June/015600.html

This backport drops sample config file and docs changes and only removes
the zuul configuration since conceivable the project could still be used
with older branches.

Change-Id: Ie990da4e68e82d998768fa0c047cca4cccd59915
Signed-off-by: Sean McGinnis <sean.mcginnis@gmail.com>
(cherry picked from commit cded615f86)
(cherry picked from commit 2bf9ca4599)
(cherry picked from commit 4b33113290)
(cherry picked from commit e3eab6e653)
(cherry picked from commit 77aa18e554)
changes/60/750960/2
Sean McGinnis 10 months ago
committed by Michał Dulko
parent
commit
8399b23cba
5 changed files with 1 additions and 294 deletions
  1. +0
    -9
      .zuul.yaml
  2. +1
    -15
      devstack/devstackgaterc
  3. +0
    -220
      devstack/local.conf.df.sample
  4. +0
    -49
      devstack/local.conf.pod-in-vm.undercloud.df.sample
  5. +0
    -1
      playbooks/kuryr-kubernetes-install-base/run.yaml

+ 0
- 9
.zuul.yaml View File

@ -9,7 +9,6 @@
required-projects:
- openstack/devstack-gate
- openstack/devstack-plugin-container
- openstack/dragonflow
- openstack/kuryr
- openstack/kuryr-kubernetes
irrelevant-files:
@ -44,13 +43,6 @@
name: kuryr-kubernetes-install-default
parent: kuryr-kubernetes-install-base
- job:
name: kuryr-kubernetes-install-dragonflow
parent: kuryr-kubernetes-install-base
vars:
variant: dragonflow
voting: false
- job:
name: kuryr-kubernetes-tempest-multinode
parent: legacy-dsvm-base-multinode
@ -147,7 +139,6 @@
check:
jobs:
- kuryr-kubernetes-install-default
- kuryr-kubernetes-install-dragonflow
- kuryr-kubernetes-tempest-lbaasv2
- kuryr-kubernetes-tempest-lbaasv2-openshift
- kuryr-kubernetes-tempest-lbaasv2-daemon


+ 1
- 15
devstack/devstackgaterc View File

@ -24,18 +24,4 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-container http
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin kuryr-kubernetes https://git.openstack.org/openstack/kuryr-kubernetes"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"NEUTRON_LBAAS_SERVICE_PROVIDERV2=LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
export OVERRIDE_ENABLED_SERVICES=neutron,q-svc,key,mysql,rabbit,docker,etcd3,kubernetes-api,kubernetes-controller-manager,kubernetes-scheduler,kubelet,kuryr-kubernetes,q-lbaasv2
if [[ $VARIANT == 'default' ]]; then
export OVERRIDE_ENABLED_SERVICES+=,q-agt,q-dhcp,q-l3
elif [[ $VARIANT == 'dragonflow' ]]; then
export OVERRIDE_ENABLED_SERVICES+=,df-redis,df-redis-server,df-controller,df-ext-services,df-zmq-publisher-service,df-l3-agent
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin dragonflow https://github.com/openstack/dragonflow"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_ENABLE_DRAGONFLOW_LOCAL_CONTROLLER=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"DF_RUNNING_IN_GATE=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TUNNEL_TYPE=vxlan"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"DF_L2_RESPONDER=\"True\""
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVS_INSTALL_FROM_GIT=False"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVS_BRANCH=master"
else
die $LINENO "Unsupported variant"
fi
export OVERRIDE_ENABLED_SERVICES+=,q-agt,q-dhcp,q-l3

+ 0
- 220
devstack/local.conf.df.sample View File

@ -1,220 +0,0 @@
[[local|localrc]]
enable_plugin kuryr-kubernetes \
https://git.openstack.org/openstack/kuryr-kubernetes
enable_plugin dragonflow https://git.openstack.org/openstack/dragonflow
# If you do not want stacking to clone new versions of the enabled services,
# like for example when you did local modifications and need to ./unstack.sh
# and ./stack.sh again, uncomment the following
# RECLONE="no"
# Log settings for better readability
LOGFILE=devstack.log
LOG_COLOR=False
# Credentials
ADMIN_PASSWORD=pass
DATABASE_PASSWORD=pass
RABBIT_PASSWORD=pass
SERVICE_PASSWORD=pass
SERVICE_TOKEN=pass
# Enable Keystone v3
IDENTITY_API_VERSION=3
# In pro of speed and being lightweight, we will be explicit in regards to
# which services we enable
ENABLED_SERVICES=""
# DF services
enable_service df-redis
enable_service df-redis-server
enable_service df-controller
# Neutron services
enable_service neutron
enable_service q-svc
# Keystone
enable_service key
# Dependencies
enable_service mysql
enable_service rabbit
# enable DF local controller
Q_ENABLE_DRAGONFLOW_LOCAL_CONTROLLER=True
# DF settings
DF_RUNNING_IN_GATE=True
TUNNEL_TYPE=vxlan
DF_SELECTIVE_TOPO_DIST=False
# OCTAVIA
KURYR_K8S_LBAAS_USE_OCTAVIA=False
# Uncomment it to use L2 communication between loadbalancer and member pods
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
# Octavia LBaaSv2
LIBS_FROM_GIT+=python-octaviaclient
enable_plugin octavia https://git.openstack.org/openstack/octavia
enable_service octavia
enable_service o-api
enable_service o-cw
enable_service o-hm
enable_service o-hk
## Octavia Deps
### Image
### Barbican
enable_plugin barbican https://git.openstack.org/openstack/barbican
### Nova
enable_service n-api
enable_service n-api-meta
enable_service n-cpu
enable_service n-cond
enable_service n-sch
enable_service placement-api
enable_service placement-client
### Glance
enable_service g-api
enable_service g-reg
else
# LBaaSv2 service and Haproxy agent
enable_plugin neutron-lbaas \
https://git.openstack.org/openstack/neutron-lbaas
enable_service q-lbaasv2
fi
# By default use all the services from the kuryr-kubernetes plugin
# Docker
# ======
# If you already have docker configured, running and with its socket writable
# by the stack user, you can omit the following line.
enable_plugin devstack-plugin-container https://git.openstack.org/openstack/devstack-plugin-container
# Etcd
# ====
# The default is for devstack to run etcd for you.
enable_service etcd3
# You can also run the deprecated etcd containerized and select the image and
# version of it by commenting the etcd3 service enablement and uncommenting
#
# enable legacy_etcd
#
# You can also modify the following defaults.
# KURYR_ETCD_IMAGE="quay.io/coreos/etcd"
# KURYR_ETCD_VERSION="v3.0.8"
#
# You can select the listening and advertising client and peering Etcd
# addresses by uncommenting and changing from the following defaults:
# KURYR_ETCD_ADVERTISE_CLIENT_URL=http://my_host_ip:2379}
# KURYR_ETCD_ADVERTISE_PEER_URL=http://my_host_ip:2380}
# KURYR_ETCD_LISTEN_CLIENT_URL=http://0.0.0.0:2379}
# KURYR_ETCD_LISTEN_PEER_URL=http://0.0.0.0:2380}
#
# If you already have an etcd cluster configured and running, you can just
# comment out the lines enabling legacy_etcd and etcd3
# then uncomment and set the following line:
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
# Kubernetes
# ==========
#
# Kubernetes is run from the hyperkube docker image
# If you already have a Kubernetes deployment, you can use it instead and omit
# enabling the Kubernetes service (except Kubelet, which must be run by
# devstack so that it uses our development CNI driver.
#
# The default is, again, for devstack to run the Kubernetes services:
enable_service kubernetes-api
enable_service kubernetes-controller-manager
enable_service kubernetes-scheduler
# We use hyperkube to run the services. You can select the hyperkube image and/
# or version by uncommenting and setting the following ENV vars different
# to the following defaults:
# KURYR_HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube-amd64"
# KURYR_HYPERKUBE_VERSION="v1.6.2"
#
# If you have the 8080 port already bound to another service, you will need to
# have kubernetes API server bind to another port. In order to do that,
# uncomment and set a different port number in:
# KURYR_K8S_API_PORT="8080"
#
# If you want to test with a different range for the Cluster IPs uncomment and
# set the following ENV var to a different CIDR
# KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24"
#
# If, however, you are reusing an existing deployment, you should uncomment and
# set an ENV var so that the Kubelet devstack runs can find the API server:
# KURYR_K8S_API_URL="http://k8s_api_ip:k8s_api_port"
#
# Kubelet
# =======
#
# Kubelet should almost invariably be run by devstack
enable_service kubelet
# You can specify a different location for the hyperkube binary that will be
# extracted from the hyperkube container into the Host filesystem:
# KURYR_HYPERKUBE_BINARY=/usr/local/bin/hyperkube
#
# NOTE: KURYR_HYPERKUBE_IMAGE, KURYR_HYPERKUBE_VERSION also affect which
# the selected binary for the Kubelet.
# Kuryr watcher
# =============
#
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
# part of the codebase that connects to the Kubernetes API server to read the
# resource events and convert them to Neutron actions
enable_service kuryr-kubernetes
# Kuryr POD VIF Driver
# ====================
#
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
# a nested deployment is desired, the corresponding driver need to be set,
# e.g.: nested-vlan or nested-macvlan
# KURYR_POD_VIF_DRIVER=neutron-vif
# Kuryr Ports Pools
# =================
#
# To speed up containers boot time the kuryr ports pool driver can be enabled
# by uncommenting the next line, so that neutron port resources are precreated
# and ready to be used by the pods when needed
# KURYR_USE_PORTS_POOLS=True
#
# By default the pool driver is noop, i.e., there is no pool. If pool
# optimizations want to be used you need to set it to 'neutron' for the
# baremetal case, or to 'nested' for the nested case
# KURYR_VIF_POOL_DRIVER=noop
#
# There are extra configuration options for the pools that can be set to decide
# on the minimum number of ports that should be ready to use at each pool, the
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
# the number of neutron ports to create in bulk operations. Finally, the update
# frequency between actions over the pool can be set too
# KURYR_VIF_POOL_MIN=5
# KURYR_VIF_POOL_MAX=0
# KURYR_VIF_POOL_BATCH=10
# KURYR_VIF_POOL_UPDATE_FREQ=20
# Increase Octavia amphorae timeout so that the first LB amphora has time to
# build and boot
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
else
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
fi
[[post-config|$OCTAVIA_CONF]]
[controller_worker]
amp_active_retries=9999

+ 0
- 49
devstack/local.conf.pod-in-vm.undercloud.df.sample View File

@ -1,49 +0,0 @@
[[local|localrc]]
Q_ENABLE_DRAGONFLOW_LOCAL_CONTROLLER=True
DATABASE_PASSWORD=pass
RABBIT_PASSWORD=pass
SERVICE_PASSWORD=pass
SERVICE_TOKEN=pass
ADMIN_PASSWORD=pass
MULTI_HOST=1
# Dragonflow plugin and services
enable_plugin dragonflow https://git.openstack.org/openstack/dragonflow
enable_service df-controller
enable_service df-redis
enable_service df-redis-server
enable_service df-metadata
enable_service q-trunk
# Neutron services
disable_service n-net
enable_service q-svc
enable_service q-qos
disable_service q-l3
disable_service df-l3-agent
# We have to disable the neutron L2 agent. DF does not use the L2 agent.
disable_service q-agt
# We have to disable the neutron dhcp agent. DF does not use the dhcp agent.
disable_service q-dhcp
# LBaaSv2 service and Haproxy agent
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
enable_service q-lbaasv2
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
# Enable heat services if you want to deploy overcloud using Heat stack
enable_plugin heat https://git.openstack.org/openstack/heat
enable_service h-eng h-api h-api-cfn h-api-cw
disable_service tempest
DF_REDIS_PUBSUB=True
Q_USE_PROVIDERNET_FOR_PUBLIC=True
Q_FLOATING_ALLOCATION_POOL=start=172.24.4.10,end=172.24.4.200
PUBLIC_NETWORK_NAME=public
PUBLIC_NETWORK_GATEWAY=172.24.4.1

+ 0
- 1
playbooks/kuryr-kubernetes-install-base/run.yaml View File

@ -41,7 +41,6 @@
export KEEP_LOCALRC=1
export PROJECTS="openstack/kuryr-kubernetes $PROJECTS"
export PROJECTS="openstack/dragonflow $PROJECTS"
export PROJECTS="openstack/devstack-plugin-container $PROJECTS"
function gate_hook {


Loading…
Cancel
Save