Implement the sandbox proposal - Part 2
This commit extends the existing Docker driver to leverage Nova to provision sandbox. In other words, there are two drivers: 1. docker.driver.DockerDriver (default) 2. docker.driver.NovaDockerDriver Users can choose which driver they want to use. The major difference is the second driver integrate with Neutron, Neutron is used to provide networking for container. The files under nova/* were copied from nova-docker with minor modification (mainly for fixing bugs and removing unused codes). In particular, nova/virt/docker/driver.py contains a DockerDriver for Nova. It implements the Nova virt driver interface using docker. This custom virt driver is used to create/delete/manage sandbox containers. At this commit, we hardcoded the flavor to m1.small and nics to 'auto' when creating sandbox instances. We might make them as parameters and specified by end-users. The flavor will decide the resource constraints of the container and nics will decide how the networking of the container being configured. The docker image kubernetes/pause is chosen to be the image of the sandbox container, since its size is small and statisfies what we want (an empty container that keeps running). When creating the sandbox, we haven't specify the security group yet so the default security group is used. Users need to open ports in that security groups to access container from outside. Later, we could create a custom security group for each container, and automatically open ports that are exposed by the container. For more details of the design, please refer: https://review.openstack.org/#/c/365754/ Implements: blueprint neutron-integration Depends-On: Ib8f193ea1edf1f148e9ba505205495170ebf6d67 Change-Id: I1543f386b6439d305b308d6c6ebe073225223c25
This commit is contained in:
parent
b7698ffe3b
commit
3438316954
@ -18,14 +18,4 @@
|
||||
# Keep all devstack settings here instead of project-config for easy
|
||||
# maintain if we want to change devstack config settings in future.
|
||||
|
||||
# Notes(eliqiao): Overwrite defaut ENABLED_SERVICES since currently zun
|
||||
# doesn't relay on any other OpenStack service yet.
|
||||
|
||||
# Fixme(eliqiao): We don't need nova service, but devstack won't create
|
||||
# userrc for us if nova service is not enabled, check
|
||||
# https://github.com/openstack-dev/devstack/blob/master/stack.sh#L1310
|
||||
|
||||
OVERRIDE_ENABLED_SERVICES="dstat,key,mysql,rabbit,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,g-api,g-reg,tempest"
|
||||
export OVERRIDE_ENABLED_SERVICES
|
||||
|
||||
$BASE/new/devstack-gate/devstack-vm-gate.sh
|
||||
|
60
devstack/lib/nova
Normal file
60
devstack/lib/nova
Normal file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# lib/nova
|
||||
# Configure the docker hypervisor
|
||||
|
||||
# Dependencies:
|
||||
#
|
||||
# - ``functions`` file
|
||||
# - ``DEST``, ``NOVA_CONF``, ``STACK_USER`` must be defined
|
||||
|
||||
# ``stack.sh`` calls the entry points in this order:
|
||||
#
|
||||
# - configure_nova_docker
|
||||
|
||||
# Save trace setting
|
||||
XTRACE=$(set +o | grep xtrace)
|
||||
set +o xtrace
|
||||
|
||||
# Defaults
|
||||
# --------
|
||||
NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova}
|
||||
NOVA_CONF=${NOVA_CONF:-NOVA_CONF_DIR/nova.conf}
|
||||
|
||||
|
||||
# Entry Points
|
||||
# ------------
|
||||
|
||||
# configure_nova_docker - Set config files, create data dirs, etc
|
||||
function configure_nova_docker {
|
||||
iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
|
||||
|
||||
# CentOS/RedHat distros don't start the services just after the package
|
||||
# is installed if it is not explicitily set. So the script fails on
|
||||
# them in this killall because there is nothing to kill.
|
||||
sudo killall docker || true
|
||||
|
||||
# Enable debug level logging
|
||||
if [ -f "/etc/default/docker" ]; then
|
||||
sudo cat /etc/default/docker
|
||||
sudo sed -i 's/^.*DOCKER_OPTS=.*$/DOCKER_OPTS=\"--debug --storage-opt dm.override_udev_sync_check=true\"/' /etc/default/docker
|
||||
sudo cat /etc/default/docker
|
||||
fi
|
||||
if [ -f "/etc/sysconfig/docker" ]; then
|
||||
sudo cat /etc/sysconfig/docker
|
||||
sudo sed -i 's/^.*OPTIONS=.*$/OPTIONS=--debug --selinux-enabled/' /etc/sysconfig/docker
|
||||
sudo cat /etc/sysconfig/docker
|
||||
fi
|
||||
if [ -f "/usr/lib/systemd/system/docker.service" ]; then
|
||||
sudo cat /usr/lib/systemd/system/docker.service
|
||||
sudo sed -i 's/docker daemon/docker daemon --debug/' /usr/lib/systemd/system/docker.service
|
||||
sudo cat /usr/lib/systemd/system/docker.service
|
||||
sudo systemctl daemon-reload
|
||||
fi
|
||||
|
||||
sudo service docker start || true
|
||||
|
||||
# setup rootwrap filters
|
||||
local rootwrap_conf_src_dir="$DEST/zun/etc/nova"
|
||||
sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.d/*.filters /etc/nova/rootwrap.d
|
||||
}
|
@ -65,7 +65,7 @@ else
|
||||
fi
|
||||
|
||||
DOCKER_GROUP=${DOCKER_GROUP:-docker}
|
||||
ZUN_DRIVER=${DEFAULT_ZUN_DRIVER:-docker}
|
||||
ZUN_DRIVER=${ZUN_DRIVER:-docker}
|
||||
|
||||
ETCD_VERSION=v3.0.13
|
||||
if is_ubuntu; then
|
||||
@ -83,16 +83,45 @@ function check_docker {
|
||||
fi
|
||||
}
|
||||
|
||||
function install_docker {
|
||||
check_docker || curl -fsSL https://get.docker.com/ | sudo sh
|
||||
|
||||
echo "Adding ${STACK_USER} to ${docker_group}..."
|
||||
add_user_to_group $STACK_USER $DOCKER_GROUP
|
||||
echo "Adding $(whoami) to ${DOCKER_GROUP}..."
|
||||
add_user_to_group $(whoami) $DOCKER_GROUP
|
||||
|
||||
if is_fedora; then
|
||||
install_package socat dnsmasq
|
||||
fi
|
||||
|
||||
if is_ubuntu && [ $UBUNTU_RELEASE_BASE_NUM -le 14 ]; then
|
||||
sudo service docker start || true
|
||||
else
|
||||
sudo systemctl enable docker.service
|
||||
sudo systemctl start docker || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Test if any zun services are enabled
|
||||
# is_zun_enabled
|
||||
function is_zun_enabled {
|
||||
[[ ,${ENABLED_SERVICES} =~ ,"zun-" ]] && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
# cleanup_zun() - Remove residual data files, anything left over from previous
|
||||
# runs that a clean run would need to clean up
|
||||
function cleanup_zun {
|
||||
sudo rm -rf $ZUN_STATE_PATH $ZUN_AUTH_CACHE_DIR
|
||||
|
||||
# Destroy old containers
|
||||
local container_name_prefix=${CONTAINER_NAME_PREFIX:-zun-}
|
||||
local containers
|
||||
containers=`sudo docker ps -a | grep $container_name_prefix | sed "s/.*\($container_name_prefix[0-9a-zA-Z-]*\).*/\1/g"`
|
||||
if [ ! "$containers" = "" ]; then
|
||||
sudo docker rm -f $containers || true
|
||||
fi
|
||||
}
|
||||
|
||||
# configure_zun() - Set config files, create data dirs, etc
|
||||
@ -108,15 +137,11 @@ function configure_zun {
|
||||
create_zun_conf
|
||||
|
||||
create_api_paste_conf
|
||||
|
||||
if [[ ${ZUN_DRIVER} == "docker" ]]; then
|
||||
check_docker || install_docker
|
||||
fi
|
||||
}
|
||||
|
||||
# upload_sandbox_image() - Upload sandbox image to glance
|
||||
function upload_sandbox_image {
|
||||
if [[ ${ZUN_DRIVER} == "docker" ]]; then
|
||||
if [[ ${ZUN_DRIVER} == "docker" || ${ZUN_DRIVER} == "nova-docker" ]]; then
|
||||
sg docker "docker pull kubernetes/pause"
|
||||
sg docker "docker save kubernetes/pause" | openstack image create kubernetes/pause --public --container-format docker --disk-format raw
|
||||
fi
|
||||
@ -149,6 +174,11 @@ function create_zun_conf {
|
||||
|
||||
# (Re)create ``zun.conf``
|
||||
rm -f $ZUN_CONF
|
||||
if [[ ${ZUN_DRIVER} == "docker" ]]; then
|
||||
iniset $ZUN_CONF DEFAULT container_driver docker.driver.DockerDriver
|
||||
elif [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
|
||||
iniset $ZUN_CONF DEFAULT container_driver docker.driver.NovaDockerDriver
|
||||
fi
|
||||
iniset $ZUN_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||
iniset $ZUN_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
|
||||
iniset $ZUN_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
||||
@ -182,6 +212,8 @@ function create_zun_conf {
|
||||
${KEYSTONE_SERVICE_PROTOCOL}://${HOST_IP}:${KEYSTONE_SERVICE_PORT}/v3
|
||||
iniset $ZUN_CONF keystone_authtoken auth_version v3
|
||||
|
||||
iniset $ZUN_CONF glance images_directory $ZUN_STATE_PATH/images
|
||||
|
||||
if is_fedora || is_suse; then
|
||||
# zun defaults to /usr/local/bin, but fedora and suse pip like to
|
||||
# install things in /usr/bin
|
||||
@ -263,26 +295,11 @@ function install_zun {
|
||||
setup_develop $ZUN_DIR
|
||||
}
|
||||
|
||||
function install_docker {
|
||||
echo "Installing docker"
|
||||
curl -fsSL https://get.docker.com/ | sudo sh
|
||||
echo "Adding $(whoami) to ${DOCKER_GROUP}..."
|
||||
sudo usermod -a -G ${DOCKER_GROUP} $(whoami)
|
||||
newgrp ${DOCKER_GROUP}
|
||||
if is_ubuntu && [ $UBUNTU_RELEASE_BASE_NUM -le 14 ]; then
|
||||
sudo service docker start || true
|
||||
else
|
||||
sudo systemctl enable docker.service
|
||||
sudo systemctl start docker || true
|
||||
fi
|
||||
}
|
||||
|
||||
function install_etcd_server {
|
||||
echo "Installing etcd"
|
||||
check_docker || install_docker
|
||||
# If there's a container named 'etcd' already exists, remove it.
|
||||
if [ $(sudo docker ps -a | awk '{print $NF}' | grep -w etcd) ]; then
|
||||
sudo docker rm -f etcd
|
||||
sudo docker rm -f etcd || true
|
||||
fi
|
||||
sudo docker run -d --net=host --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
@ -322,7 +339,7 @@ function start_zun_api {
|
||||
# start_zun_compute() - Start Zun compute agent
|
||||
function start_zun_compute {
|
||||
echo "Start zun compute..."
|
||||
if [[ ${ZUN_DRIVER} == "docker" ]]; then
|
||||
if [[ ${ZUN_DRIVER} == "docker" || ${ZUN_DRIVER} == "nova-docker" ]]; then
|
||||
run_process zun-compute "$ZUN_BIN_DIR/zun-compute" ${DOCKER_GROUP}
|
||||
else
|
||||
run_process zun-compute "$ZUN_BIN_DIR/zun-compute"
|
||||
@ -337,6 +354,7 @@ function start_zun_etcd {
|
||||
function stop_zun-etcd {
|
||||
echo "Stop zun etcd..."
|
||||
sudo docker stop etcd
|
||||
sudo docker rm -f etcd || true
|
||||
}
|
||||
|
||||
# start_zun() - Start running processes, including screen
|
||||
|
7
devstack/override-defaults
Normal file
7
devstack/override-defaults
Normal file
@ -0,0 +1,7 @@
|
||||
# Plug-in overrides
|
||||
|
||||
ZUN_DRIVER=${ZUN_DRIVER:-docker}
|
||||
|
||||
if [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
|
||||
export VIRT_DRIVER=docker
|
||||
fi
|
@ -6,11 +6,13 @@ set -o xtrace
|
||||
|
||||
echo_summary "zun's plugin.sh was called..."
|
||||
source $DEST/zun/devstack/lib/zun
|
||||
source $DEST/zun/devstack/lib/nova
|
||||
(set -o posix; set)
|
||||
|
||||
if is_service_enabled zun-api zun-compute; then
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
echo_summary "Installing zun"
|
||||
install_docker
|
||||
install_zun
|
||||
|
||||
LIBS_FROM_GIT="${LIBS_FROM_GIT},python-zunclient"
|
||||
@ -25,6 +27,10 @@ if is_service_enabled zun-api zun-compute; then
|
||||
create_zun_accounts
|
||||
fi
|
||||
|
||||
if [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
|
||||
configure_nova_docker
|
||||
fi
|
||||
|
||||
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
# Initialize zun
|
||||
init_zun
|
||||
|
6
etc/nova/rootwrap.d/docker.filters
Normal file
6
etc/nova/rootwrap.d/docker.filters
Normal file
@ -0,0 +1,6 @@
|
||||
# nova-rootwrap command filters for setting up network in the docker driver
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[Filters]
|
||||
# nova/virt/docker/driver.py: 'ln', '-sf', '/var/run/netns/.*'
|
||||
ln: CommandFilter, /bin/ln, root
|
0
nova/__init__.py
Normal file
0
nova/__init__.py
Normal file
16
nova/virt/__init__.py
Normal file
16
nova/virt/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Allow composition of nova.virt namespace from disparate packages.
|
||||
__import__('pkg_resources').declare_namespace(__name__)
|
22
nova/virt/docker/__init__.py
Normal file
22
nova/virt/docker/__init__.py
Normal file
@ -0,0 +1,22 @@
|
||||
# Copyright (c) 2013 dotCloud, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`docker` -- Nova support for Docker Hypervisor to run Linux containers
|
||||
===========================================================================
|
||||
"""
|
||||
from nova.virt.docker import driver
|
||||
|
||||
DockerDriver = driver.DockerDriver
|
99
nova/virt/docker/client.py
Normal file
99
nova/virt/docker/client.py
Normal file
@ -0,0 +1,99 @@
|
||||
# Copyright (c) 2013 dotCloud, Inc.
|
||||
# Copyright 2014 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from docker import client
|
||||
from docker import tls
|
||||
|
||||
CONF = cfg.CONF
|
||||
DEFAULT_TIMEOUT_SECONDS = 120
|
||||
DEFAULT_DOCKER_API_VERSION = '1.19'
|
||||
|
||||
|
||||
def filter_data(f):
|
||||
"""Decorator that post-processes data returned by Docker.
|
||||
|
||||
This will avoid any surprises with different versions of Docker.
|
||||
"""
|
||||
@functools.wraps(f, assigned=[])
|
||||
def wrapper(*args, **kwds):
|
||||
out = f(*args, **kwds)
|
||||
|
||||
def _filter(obj):
|
||||
if isinstance(obj, list):
|
||||
new_list = []
|
||||
for o in obj:
|
||||
new_list.append(_filter(o))
|
||||
obj = new_list
|
||||
if isinstance(obj, dict):
|
||||
for k, v in obj.items():
|
||||
if isinstance(k, six.string_types):
|
||||
obj[k.lower()] = _filter(v)
|
||||
return obj
|
||||
return _filter(out)
|
||||
return wrapper
|
||||
|
||||
|
||||
class DockerHTTPClient(client.Client):
|
||||
def __init__(self, url='unix://var/run/docker.sock'):
|
||||
if (CONF.docker.cert_file or
|
||||
CONF.docker.key_file):
|
||||
client_cert = (CONF.docker.cert_file, CONF.docker.key_file)
|
||||
else:
|
||||
client_cert = None
|
||||
if (CONF.docker.ca_file or
|
||||
CONF.docker.api_insecure or
|
||||
client_cert):
|
||||
ssl_config = tls.TLSConfig(
|
||||
client_cert=client_cert,
|
||||
ca_cert=CONF.docker.ca_file,
|
||||
verify=CONF.docker.api_insecure)
|
||||
else:
|
||||
ssl_config = False
|
||||
super(DockerHTTPClient, self).__init__(
|
||||
base_url=url,
|
||||
version=DEFAULT_DOCKER_API_VERSION,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS,
|
||||
tls=ssl_config
|
||||
)
|
||||
self._setup_decorators()
|
||||
|
||||
def _setup_decorators(self):
|
||||
for name, member in inspect.getmembers(self, inspect.ismethod):
|
||||
if not name.startswith('_'):
|
||||
setattr(self, name, filter_data(member))
|
||||
|
||||
def pause(self, container_id):
|
||||
url = self._url("/containers/{0}/pause".format(container_id))
|
||||
res = self._post(url)
|
||||
return res.status_code == 204
|
||||
|
||||
def unpause(self, container_id):
|
||||
url = self._url("/containers/{0}/unpause".format(container_id))
|
||||
res = self._post(url)
|
||||
return res.status_code == 204
|
||||
|
||||
def load_repository_file(self, name, path):
|
||||
with open(path, 'rb') as fh:
|
||||
self.load_image(fh)
|
||||
|
||||
def get_container_logs(self, container_id):
|
||||
return self.attach(container_id, 1, 1, 0, 1)
|
754
nova/virt/docker/driver.py
Normal file
754
nova/virt/docker/driver.py
Normal file
@ -0,0 +1,754 @@
|
||||
# Copyright (c) 2013 dotCloud, Inc.
|
||||
# Copyright 2014 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A Docker Hypervisor which allows running Linux Containers instead of VMs.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from docker import errors
|
||||
import eventlet
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import fileutils
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import units
|
||||
from oslo_utils import versionutils
|
||||
|
||||
from nova.compute import arch
|
||||
from nova.compute import flavors
|
||||
from nova.compute import hv_type
|
||||
from nova.compute import power_state
|
||||
from nova.compute import vm_mode
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova.i18n import _LW
|
||||
from nova import objects
|
||||
from nova import utils
|
||||
from nova.virt.docker import client as docker_client
|
||||
from nova.virt.docker import hostinfo
|
||||
from nova.virt.docker import network
|
||||
from nova.virt import driver
|
||||
from nova.virt import firewall
|
||||
from nova.virt import hardware
|
||||
from nova.virt import hostutils
|
||||
from nova.virt import images
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('my_ip', 'nova.conf.netconf')
|
||||
CONF.import_opt('instances_path', 'nova.compute.manager')
|
||||
|
||||
docker_opts = [
|
||||
cfg.StrOpt('root_directory',
|
||||
default='/var/lib/docker',
|
||||
help='Path to use as the root of the Docker runtime.'),
|
||||
cfg.StrOpt('host_url',
|
||||
default='unix:///var/run/docker.sock',
|
||||
help='tcp://host:port to bind/connect to or '
|
||||
'unix://path/to/socket to use'),
|
||||
cfg.BoolOpt('api_insecure',
|
||||
default=False,
|
||||
help='If set, ignore any SSL validation issues'),
|
||||
cfg.StrOpt('ca_file',
|
||||
help='Location of CA certificates file for '
|
||||
'securing docker api requests (tlscacert).'),
|
||||
cfg.StrOpt('cert_file',
|
||||
help='Location of TLS certificate file for '
|
||||
'securing docker api requests (tlscert).'),
|
||||
cfg.StrOpt('key_file',
|
||||
help='Location of TLS private key file for '
|
||||
'securing docker api requests (tlskey).'),
|
||||
cfg.StrOpt('vif_driver',
|
||||
default='nova.virt.docker.vifs.DockerGenericVIFDriver'),
|
||||
cfg.StrOpt('snapshots_directory',
|
||||
default='$instances_path/snapshots',
|
||||
help='Location where docker driver will temporarily store '
|
||||
'snapshots.'),
|
||||
cfg.StrOpt('shared_directory',
|
||||
default=None,
|
||||
help='Shared directory where glance images located. If '
|
||||
'specified, docker will try to load the image from '
|
||||
'the shared directory by image ID.'),
|
||||
cfg.BoolOpt('privileged',
|
||||
default=False,
|
||||
help='Set true can own all root privileges in a container.'),
|
||||
cfg.ListOpt('default_nameservers',
|
||||
default=['8.8.8.8', '8.8.4.4'],
|
||||
help='The default DNS server to use.'),
|
||||
]
|
||||
|
||||
CONF.register_opts(docker_opts, 'docker')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerDriver(driver.ComputeDriver):
|
||||
"""Docker hypervisor driver."""
|
||||
|
||||
def __init__(self, virtapi):
|
||||
super(DockerDriver, self).__init__(virtapi)
|
||||
self._docker = None
|
||||
vif_class = importutils.import_class(CONF.docker.vif_driver)
|
||||
self.vif_driver = vif_class()
|
||||
self.firewall_driver = firewall.load_driver(
|
||||
default='nova.virt.firewall.NoopFirewallDriver')
|
||||
# NOTE(zhangguoqing): For passing the nova unit tests
|
||||
self.active_migrations = {}
|
||||
|
||||
@property
|
||||
def docker(self):
|
||||
if self._docker is None:
|
||||
self._docker = docker_client.DockerHTTPClient(CONF.docker.host_url)
|
||||
return self._docker
|
||||
|
||||
def init_host(self, host):
|
||||
if self._is_daemon_running() is False:
|
||||
raise exception.NovaException(
|
||||
_('Docker daemon is not running or is not reachable'
|
||||
' (check the rights on /var/run/docker.sock)'))
|
||||
|
||||
def _is_daemon_running(self):
|
||||
return self.docker.ping()
|
||||
|
||||
def _start_firewall(self, instance, network_info):
|
||||
self.firewall_driver.setup_basic_filtering(instance, network_info)
|
||||
self.firewall_driver.prepare_instance_filter(instance, network_info)
|
||||
self.firewall_driver.apply_instance_filter(instance, network_info)
|
||||
|
||||
def _stop_firewall(self, instance, network_info):
|
||||
self.firewall_driver.unfilter_instance(instance, network_info)
|
||||
|
||||
def refresh_security_group_rules(self, security_group_id):
|
||||
"""Refresh security group rules from data store.
|
||||
|
||||
Invoked when security group rules are updated.
|
||||
|
||||
:param security_group_id: The security group id.
|
||||
|
||||
"""
|
||||
self.firewall_driver.refresh_security_group_rules(security_group_id)
|
||||
|
||||
def refresh_security_group_members(self, security_group_id):
|
||||
"""Refresh security group members from data store.
|
||||
|
||||
Invoked when instances are added/removed to a security group.
|
||||
|
||||
:param security_group_id: The security group id.
|
||||
|
||||
"""
|
||||
self.firewall_driver.refresh_security_group_members(security_group_id)
|
||||
|
||||
def refresh_provider_fw_rules(self):
|
||||
"""Triggers a firewall update based on database changes."""
|
||||
self.firewall_driver.refresh_provider_fw_rules()
|
||||
|
||||
def refresh_instance_security_rules(self, instance):
|
||||
"""Refresh security group rules from data store.
|
||||
|
||||
Gets called when an instance gets added to or removed from
|
||||
the security group the instance is a member of or if the
|
||||
group gains or loses a rule.
|
||||
|
||||
:param instance: The instance object.
|
||||
|
||||
"""
|
||||
self.firewall_driver.refresh_instance_security_rules(instance)
|
||||
|
||||
def ensure_filtering_rules_for_instance(self, instance, network_info):
|
||||
"""Set up filtering rules.
|
||||
|
||||
:param instance: The instance object.
|
||||
:param network_info: Instance network information.
|
||||
|
||||
"""
|
||||
self.firewall_driver.setup_basic_filtering(instance, network_info)
|
||||
self.firewall_driver.prepare_instance_filter(instance, network_info)
|
||||
|
||||
def unfilter_instance(self, instance, network_info):
|
||||
"""Stop filtering instance.
|
||||
|
||||
:param instance: The instance object.
|
||||
:param network_info: Instance network information.
|
||||
|
||||
"""
|
||||
self.firewall_driver.unfilter_instance(instance, network_info)
|
||||
|
||||
def list_instances(self, inspect=False):
|
||||
res = []
|
||||
for container in self.docker.containers(all=True):
|
||||
info = self.docker.inspect_container(container['id'])
|
||||
if not info:
|
||||
continue
|
||||
if inspect:
|
||||
res.append(info)
|
||||
else:
|
||||
res.append(info['Config'].get('Hostname'))
|
||||
return res
|
||||
|
||||
def attach_interface(self, instance, image_meta, vif):
|
||||
"""Attach an interface to the container."""
|
||||
self.vif_driver.plug(instance, vif)
|
||||
container_id = self._find_container_by_instance(instance).get('id')
|
||||
self.vif_driver.attach(instance, vif, container_id)
|
||||
|
||||
def detach_interface(self, instance, vif):
|
||||
"""Detach an interface from the container."""
|
||||
self.vif_driver.unplug(instance, vif)
|
||||
|
||||
def plug_vifs(self, instance, network_info):
|
||||
"""Plug VIFs into networks."""
|
||||
for vif in network_info:
|
||||
self.vif_driver.plug(instance, vif)
|
||||
self._start_firewall(instance, network_info)
|
||||
|
||||
def _attach_vifs(self, instance, network_info):
|
||||
"""Plug VIFs into container."""
|
||||
if not network_info:
|
||||
return
|
||||
|
||||
if os.name == 'nt':
|
||||
return
|
||||
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
raise exception.InstanceNotFound(instance_id=instance['name'])
|
||||
netns_path = '/var/run/netns'
|
||||
if not os.path.exists(netns_path):
|
||||
utils.execute(
|
||||
'mkdir', '-p', netns_path, run_as_root=True)
|
||||
nspid = self._find_container_pid(container_id)
|
||||
if not nspid:
|
||||
msg = _('Cannot find any PID under container "{0}"')
|
||||
raise RuntimeError(msg.format(container_id))
|
||||
netns_path = os.path.join(netns_path, container_id)
|
||||
utils.execute(
|
||||
'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),
|
||||
'/var/run/netns/{0}'.format(container_id),
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link',
|
||||
'set', 'lo', 'up', run_as_root=True)
|
||||
|
||||
for vif in network_info:
|
||||
self.vif_driver.attach(instance, vif, container_id)
|
||||
|
||||
def unplug_vifs(self, instance, network_info):
|
||||
"""Unplug VIFs from networks."""
|
||||
for vif in network_info:
|
||||
self.vif_driver.unplug(instance, vif)
|
||||
self._stop_firewall(instance, network_info)
|
||||
|
||||
def _encode_utf8(self, value):
|
||||
return unicode(value).encode('utf-8')
|
||||
|
||||
def _find_container_by_instance(self, instance):
|
||||
try:
|
||||
name = self._get_container_name(instance)
|
||||
containers = self.docker.containers(all=True,
|
||||
filters={'name': name})
|
||||
if containers:
|
||||
# NOTE(dims): We expect only one item in the containers list
|
||||
return self.docker.inspect_container(containers[0]['id'])
|
||||
except errors.APIError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
return {}
|
||||
|
||||
def _get_container_name(self, instance):
|
||||
return "zun-sandbox-" + instance['uuid']
|
||||
|
||||
def _get_container_id(self, instance):
|
||||
return self._find_container_by_instance(instance).get('id')
|
||||
|
||||
def get_info(self, instance):
|
||||
container = self._find_container_by_instance(instance)
|
||||
if not container:
|
||||
raise exception.InstanceNotFound(instance_id=instance['name'])
|
||||
running = container['State'].get('Running')
|
||||
mem = container['Config'].get('Memory', 0)
|
||||
|
||||
# NOTE(ewindisch): cgroups/lxc defaults to 1024 multiplier.
|
||||
# see: _get_cpu_shares for further explaination
|
||||
num_cpu = container['Config'].get('CpuShares', 0) / 1024
|
||||
|
||||
# FIXME(ewindisch): Improve use of statistics:
|
||||
# For 'mem', we should expose memory.stat.rss, and
|
||||
# for cpu_time we should expose cpuacct.stat.system,
|
||||
# but these aren't yet exposed by Docker.
|
||||
#
|
||||
# Also see:
|
||||
# docker/docs/sources/articles/runmetrics.md
|
||||
info = hardware.InstanceInfo(
|
||||
max_mem_kb=mem,
|
||||
mem_kb=mem,
|
||||
num_cpu=num_cpu,
|
||||
cpu_time_ns=0,
|
||||
state=(power_state.RUNNING if running
|
||||
else power_state.SHUTDOWN)
|
||||
)
|
||||
return info
|
||||
|
||||
def get_host_stats(self, refresh=False):
|
||||
hostname = socket.gethostname()
|
||||
stats = self.get_available_resource(hostname)
|
||||
stats['host_hostname'] = stats['hypervisor_hostname']
|
||||
stats['host_name_label'] = stats['hypervisor_hostname']
|
||||
return stats
|
||||
|
||||
def get_available_nodes(self, refresh=False):
|
||||
hostname = socket.gethostname()
|
||||
return [hostname]
|
||||
|
||||
def get_available_resource(self, nodename):
|
||||
if not hasattr(self, '_nodename'):
|
||||
self._nodename = nodename
|
||||
if nodename != self._nodename:
|
||||
LOG.error(_('Hostname has changed from %(old)s to %(new)s. '
|
||||
'A restart is required to take effect.'
|
||||
), {'old': self._nodename,
|
||||
'new': nodename})
|
||||
|
||||
memory = hostinfo.get_memory_usage()
|
||||
disk = hostinfo.get_disk_usage()
|
||||
stats = {
|
||||
'vcpus': hostinfo.get_total_vcpus(),
|
||||
'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)),
|
||||
'memory_mb': memory['total'] / units.Mi,
|
||||
'memory_mb_used': memory['used'] / units.Mi,
|
||||
'local_gb': disk['total'] / units.Gi,
|
||||
'local_gb_used': disk['used'] / units.Gi,
|
||||
'disk_available_least': disk['available'] / units.Gi,
|
||||
'hypervisor_type': 'docker',
|
||||
'hypervisor_version': versionutils.convert_version_to_int('1.0'),
|
||||
'hypervisor_hostname': self._nodename,
|
||||
'cpu_info': '?',
|
||||
'numa_topology': None,
|
||||
'supported_instances': [
|
||||
(arch.I686, hv_type.DOCKER, vm_mode.EXE),
|
||||
(arch.X86_64, hv_type.DOCKER, vm_mode.EXE)
|
||||
]
|
||||
}
|
||||
return stats
|
||||
|
||||
def _find_container_pid(self, container_id):
|
||||
n = 0
|
||||
while True:
|
||||
# NOTE(samalba): We wait for the process to be spawned inside the
|
||||
# container in order to get the the "container pid". This is
|
||||
# usually really fast. To avoid race conditions on a slow
|
||||
# machine, we allow 10 seconds as a hard limit.
|
||||
if n > 20:
|
||||
return
|
||||
info = self.docker.inspect_container(container_id)
|
||||
if info:
|
||||
pid = info['State']['Pid']
|
||||
# Pid is equal to zero if it isn't assigned yet
|
||||
if pid:
|
||||
return pid
|
||||
time.sleep(0.5)
|
||||
n += 1
|
||||
|
||||
def _get_memory_limit_bytes(self, instance):
|
||||
if isinstance(instance, objects.Instance):
|
||||
return instance.get_flavor().memory_mb * units.Mi
|
||||
else:
|
||||
system_meta = utils.instance_sys_meta(instance)
|
||||
return int(system_meta.get(
|
||||
'instance_type_memory_mb', 0)) * units.Mi
|
||||
|
||||
def _get_image_name(self, context, instance, image):
|
||||
fmt = image.container_format
|
||||
if fmt != 'docker':
|
||||
msg = _('Image container format not supported ({0})')
|
||||
raise exception.InstanceDeployFailure(msg.format(fmt),
|
||||
instance_id=instance['name'])
|
||||
return image.name
|
||||
|
||||
def _pull_missing_image(self, context, image_meta, instance):
|
||||
msg = 'Image name "%s" does not exist, fetching it...'
|
||||
LOG.debug(msg, image_meta.name)
|
||||
|
||||
shared_directory = CONF.docker.shared_directory
|
||||
if (shared_directory and
|
||||
os.path.exists(os.path.join(shared_directory,
|
||||
image_meta.id))):
|
||||
LOG.debug('Found %s in shared_directory', image_meta.id)
|
||||
try:
|
||||
LOG.debug('Loading repository file into docker %s',
|
||||
self._encode_utf8(image_meta.name))
|
||||
self.docker.load_repository_file(
|
||||
self._encode_utf8(image_meta.name),
|
||||
os.path.join(shared_directory, image_meta.id))
|
||||
return self.docker.inspect_image(
|
||||
self._encode_utf8(image_meta.name))
|
||||
except Exception as e:
|
||||
# If failed to load image from shared_directory, continue
|
||||
# to download the image from glance then load.
|
||||
LOG.warning(_('Cannot load repository file from shared '
|
||||
'directory: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
|
||||
# TODO(imain): It would be nice to do this with file like object
|
||||
# passing but that seems a bit complex right now.
|
||||
snapshot_directory = CONF.docker.snapshots_directory
|
||||
fileutils.ensure_tree(snapshot_directory)
|
||||
with utils.tempdir(dir=snapshot_directory) as tmpdir:
|
||||
try:
|
||||
out_path = os.path.join(tmpdir, uuid.uuid4().hex)
|
||||
|
||||
LOG.debug('Fetching image with id %s from glance',
|
||||
image_meta.id)
|
||||
images.fetch(context, image_meta.id, out_path)
|
||||
LOG.debug('Loading repository file into docker %s',
|
||||
self._encode_utf8(image_meta.name))
|
||||
self.docker.load_repository_file(
|
||||
self._encode_utf8(image_meta.name),
|
||||
out_path
|
||||
)
|
||||
return self.docker.inspect_image(
|
||||
self._encode_utf8(image_meta.name))
|
||||
except Exception as e:
|
||||
LOG.warning(_('Cannot load repository file: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
msg = _('Cannot load repository file: {0}')
|
||||
raise exception.NovaException(msg.format(e),
|
||||
instance_id=image_meta.name)
|
||||
|
||||
def _create_instance_file(self, id, name, data):
|
||||
file_dir = os.path.join(CONF.instances_path, id)
|
||||
fileutils.ensure_tree(file_dir)
|
||||
file = os.path.join(file_dir, name)
|
||||
with open(file, 'a') as f:
|
||||
f.write(data)
|
||||
os.chmod(file_dir, 0o700)
|
||||
os.chmod(file, 0o600)
|
||||
return file
|
||||
|
||||
def _cleanup_instance_file(self, id):
|
||||
dir = os.path.join(CONF.instances_path, id)
|
||||
if os.path.exists(dir):
|
||||
LOG.info(_LI('Deleting instance files %s'), dir)
|
||||
try:
|
||||
shutil.rmtree(dir)
|
||||
except OSError as e:
|
||||
LOG.error(_LE('Failed to cleanup directory %(target)s: '
|
||||
'%(e)s'), {'target': dir, 'e': e})
|
||||
|
||||
def _neutron_failed_callback(self, event_name, instance):
|
||||
LOG.error(_LE('Neutron Reported failure on event '
|
||||
'%(event)s for instance %(uuid)s'),
|
||||
{'event': event_name, 'uuid': instance.uuid},
|
||||
instance=instance)
|
||||
if CONF.vif_plugging_is_fatal:
|
||||
raise exception.VirtualInterfaceCreateException()
|
||||
|
||||
def _get_neutron_events(self, network_info):
|
||||
# NOTE(danms): We need to collect any VIFs that are currently
|
||||
# down that we expect a down->up event for. Anything that is
|
||||
# already up will not undergo that transition, and for
|
||||
# anything that might be stale (cache-wise) assume it's
|
||||
# already up so we don't block on it.
|
||||
return [('network-vif-plugged', vif['id'])
|
||||
for vif in network_info if vif.get('active', True) is False]
|
||||
|
||||
def _start_container(self, container_id, instance, network_info=None):
|
||||
self.docker.start(container_id)
|
||||
|
||||
if not network_info:
|
||||
return
|
||||
timeout = CONF.vif_plugging_timeout
|
||||
if (utils.is_neutron() and timeout):
|
||||
events = self._get_neutron_events(network_info)
|
||||
else:
|
||||
events = []
|
||||
|
||||
try:
|
||||
with self.virtapi.wait_for_instance_event(
|
||||
instance, events, deadline=timeout,
|
||||
error_callback=self._neutron_failed_callback):
|
||||
self.plug_vifs(instance, network_info)
|
||||
self._attach_vifs(instance, network_info)
|
||||
except eventlet.timeout.Timeout:
|
||||
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
|
||||
'instance %(uuid)s'), {'uuid': instance['name']})
|
||||
if CONF.vif_plugging_is_fatal:
|
||||
self.docker.kill(container_id)
|
||||
self.docker.remove_container(container_id, force=True)
|
||||
raise exception.InstanceDeployFailure(
|
||||
'Timeout waiting for vif plugging',
|
||||
instance_id=instance['name'])
|
||||
except (Exception) as e:
|
||||
LOG.warning(_('Cannot setup network: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
msg = _('Cannot setup network: {0}')
|
||||
self.docker.kill(container_id)
|
||||
self.docker.remove_container(container_id, force=True)
|
||||
raise exception.InstanceDeployFailure(msg.format(e),
|
||||
instance_id=instance['name'])
|
||||
|
||||
def spawn(self, context, instance, image_meta, injected_files,
|
||||
admin_password, network_info=None, block_device_info=None,
|
||||
flavor=None):
|
||||
image_name = self._get_image_name(context, instance, image_meta)
|
||||
args = {
|
||||
'hostname': instance['display_name'],
|
||||
'mem_limit': self._get_memory_limit_bytes(instance),
|
||||
'cpu_shares': self._get_cpu_shares(instance),
|
||||
'network_disabled': True,
|
||||
'privileged': CONF.docker.privileged,
|
||||
'binds': self._get_binds(instance, network_info),
|
||||
}
|
||||
|
||||
try:
|
||||
image = self.docker.inspect_image(self._encode_utf8(image_name))
|
||||
except errors.APIError:
|
||||
image = None
|
||||
|
||||
if not image:
|
||||
image = self._pull_missing_image(context, image_meta, instance)
|
||||
|
||||
container = self._create_container(instance, image_name, args)
|
||||
if not container:
|
||||
raise exception.InstanceDeployFailure(
|
||||
_('Cannot create container'),
|
||||
instance_id=instance['name'])
|
||||
|
||||
container_id = container['Id']
|
||||
self._start_container(container_id, instance, network_info)
|
||||
|
||||
def _get_binds(self, instance, network_info):
|
||||
binds = []
|
||||
dns = self._extract_dns_entries(network_info)
|
||||
bind = self._get_resolvconf_bind(instance['uuid'], dns)
|
||||
binds.append(bind)
|
||||
|
||||
hostname = instance['display_name']
|
||||
bind = self._get_hostname_bind(instance['uuid'], hostname)
|
||||
binds.append(bind)
|
||||
|
||||
bind = self._get_hosts_bind(instance['uuid'], hostname)
|
||||
binds.append(bind)
|
||||
return binds
|
||||
|
||||
def _extract_dns_entries(self, network_info):
|
||||
dns = []
|
||||
if network_info:
|
||||
for net in network_info:
|
||||
subnets = net['network'].get('subnets', [])
|
||||
for subnet in subnets:
|
||||
dns_entries = subnet.get('dns', [])
|
||||
for dns_entry in dns_entries:
|
||||
if 'address' in dns_entry:
|
||||
dns.append(dns_entry['address'])
|
||||
return dns if dns else CONF.docker.default_nameservers
|
||||
|
||||
def _get_resolvconf_bind(self, instance_id, nameservers):
|
||||
data = '\n'.join('nameserver %(server)s' % {'server': s}
|
||||
for s in nameservers)
|
||||
file_name = 'resolv.conf'
|
||||
host_src = self._create_instance_file(instance_id, file_name, data)
|
||||
bind = '%(host_src)s:/etc/resolv.conf:ro' % {'host_src': host_src}
|
||||
return bind
|
||||
|
||||
def _get_hostname_bind(self, instance_id, hostname):
|
||||
data = hostname
|
||||
file_name = 'hostname'
|
||||
host_src = self._create_instance_file(instance_id, file_name, data)
|
||||
bind = '%(host_src)s:/etc/hostname:ro' % {'host_src': host_src}
|
||||
return bind
|
||||
|
||||
def _get_hosts_bind(self, instance_id, hostname):
|
||||
data = ('127.0.0.1 localhost ' + hostname + '\n'
|
||||
'::1 localhost ip6-localhost ip6-loopback\n'
|
||||
'fe00::0 ip6-localnet\n'
|
||||
'ff00::0 ip6-mcastprefix\n'
|
||||
'ff02::1 ip6-allnodes\n'
|
||||
'ff02::2 ip6-allrouters\n')
|
||||
file_name = 'hosts'
|
||||
host_src = self._create_instance_file(instance_id, file_name, data)
|
||||
bind = '%(host_src)s:/etc/hosts:ro' % {'host_src': host_src}
|
||||
return bind
|
||||
|
||||
def restore(self, instance):
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
return
|
||||
|
||||
self._start_container(container_id, instance)
|
||||
|
||||
def _stop(self, container_id, instance, timeout=5):
|
||||
try:
|
||||
self.docker.stop(container_id, max(timeout, 5))
|
||||
except errors.APIError as e:
|
||||
if 'Unpause the container before stopping' not in e.explanation:
|
||||
LOG.warning(_('Cannot stop container: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
raise
|
||||
self.docker.unpause(container_id)
|
||||
self.docker.stop(container_id, timeout)
|
||||
|
||||
def soft_delete(self, instance):
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
return
|
||||
self._stop(container_id, instance)
|
||||
|
||||
def destroy(self, context, instance, network_info, block_device_info=None,
|
||||
destroy_disks=True, migrate_data=None):
|
||||
self.soft_delete(instance)
|
||||
container_id = self._get_container_id(instance)
|
||||
if container_id:
|
||||
self.docker.remove_container(container_id, force=True)
|
||||
self.cleanup(context, instance, network_info,
|
||||
block_device_info, destroy_disks)
|
||||
|
||||
def cleanup(self, context, instance, network_info, block_device_info=None,
|
||||
destroy_disks=True, migrate_data=None, destroy_vifs=True):
|
||||
"""Cleanup after instance being destroyed by Hypervisor."""
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
self.unplug_vifs(instance, network_info)
|
||||
return
|
||||
network.teardown_network(container_id)
|
||||
self.unplug_vifs(instance, network_info)
|
||||
self._cleanup_instance_file(instance['uuid'])
|
||||
|
||||
def reboot(self, context, instance, network_info, reboot_type,
|
||||
block_device_info=None, bad_volumes_callback=None):
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
return
|
||||
self._stop(container_id, instance)
|
||||
try:
|
||||
network.teardown_network(container_id)
|
||||
if network_info:
|
||||
self.unplug_vifs(instance, network_info)
|
||||
except Exception as e:
|
||||
LOG.warning(_('Cannot destroy the container network'
|
||||
' during reboot {0}').format(e),
|
||||
exc_info=True)
|
||||
return
|
||||
|
||||
self.docker.start(container_id)
|
||||
try:
|
||||
if network_info:
|
||||
self.plug_vifs(instance, network_info)
|
||||
self._attach_vifs(instance, network_info)
|
||||
except Exception as e:
|
||||
LOG.warning(_('Cannot setup network on reboot: {0}'), e,
|
||||
exc_info=True)
|
||||
return
|
||||
|
||||
def power_on(self, context, instance, network_info,
|
||||
block_device_info=None):
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
return
|
||||
self.docker.start(container_id)
|
||||
if not network_info:
|
||||
return
|
||||
try:
|
||||
self.plug_vifs(instance, network_info)
|
||||
self._attach_vifs(instance, network_info)
|
||||
except Exception as e:
|
||||
LOG.debug(_('Cannot setup network: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
msg = _('Cannot setup network: {0}')
|
||||
self.docker.kill(container_id)
|
||||
self.docker.remove_container(container_id, force=True)
|
||||
raise exception.InstanceDeployFailure(msg.format(e),
|
||||
instance_id=instance['name'])
|
||||
|
||||
def power_off(self, instance, timeout=0, retry_interval=0):
|
||||
container_id = self._get_container_id(instance)
|
||||
if not container_id:
|
||||
return
|
||||
self._stop(container_id, instance, timeout)
|
||||
|
||||
def pause(self, instance):
|
||||
"""Pause the specified instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
try:
|
||||
cont_id = self._get_container_id(instance)
|
||||
if not self.docker.pause(cont_id):
|
||||
raise exception.NovaException
|
||||
except Exception as e:
|
||||
LOG.debug(_('Error pause container: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
msg = _('Cannot pause container: {0}')
|
||||
raise exception.NovaException(msg.format(e),
|
||||
instance_id=instance['name'])
|
||||
|
||||
def unpause(self, instance):
|
||||
"""Unpause paused VM instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
try:
|
||||
cont_id = self._get_container_id(instance)
|
||||
if not self.docker.unpause(cont_id):
|
||||
raise exception.NovaException
|
||||
except Exception as e:
|
||||
LOG.debug(_('Error unpause container: %s'),
|
||||
e, instance=instance, exc_info=True)
|
||||
msg = _('Cannot unpause container: {0}')
|
||||
raise exception.NovaException(msg.format(e),
|
||||
instance_id=instance['name'])
|
||||
|
||||
def _get_cpu_shares(self, instance):
|
||||
"""Get allocated CPUs from configured flavor.
|
||||
|
||||
Docker/lxc supports relative CPU allocation.
|
||||
|
||||
cgroups specifies following:
|
||||
/sys/fs/cgroup/lxc/cpu.shares = 1024
|
||||
/sys/fs/cgroup/cpu.shares = 1024
|
||||
|
||||
For that reason we use 1024 as multiplier.
|
||||
This multiplier allows to divide the CPU
|
||||
resources fair with containers started by
|
||||
the user (e.g. docker registry) which has
|
||||
the default CpuShares value of zero.
|
||||
"""
|
||||
if isinstance(instance, objects.Instance):
|
||||
flavor = instance.get_flavor()
|
||||
else:
|
||||
flavor = flavors.extract_flavor(instance)
|
||||
return int(flavor['vcpus']) * 1024
|
||||
|
||||
def _create_container(self, instance, image_name, args):
|
||||
name = self._get_container_name(instance)
|
||||
hostname = args.pop('hostname', None)
|
||||
cpu_shares = args.pop('cpu_shares', None)
|
||||
network_disabled = args.pop('network_disabled', False)
|
||||
host_config = self.docker.create_host_config(**args)
|
||||
return self.docker.create_container(image_name,
|
||||
name=self._encode_utf8(name),
|
||||
hostname=hostname,
|
||||
cpu_shares=cpu_shares,
|
||||
network_disabled=network_disabled,
|
||||
host_config=host_config)
|
||||
|
||||
def get_host_uptime(self):
|
||||
return hostutils.sys_uptime()
|
74
nova/virt/docker/hostinfo.py
Normal file
74
nova/virt/docker/hostinfo.py
Normal file
@ -0,0 +1,74 @@
|
||||
# Copyright (c) 2013 dotCloud, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
import psutil
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def statvfs():
|
||||
docker_path = CONF.docker.root_directory
|
||||
if not os.path.exists(docker_path):
|
||||
docker_path = '/'
|
||||
return psutil.disk_usage(docker_path)
|
||||
|
||||
|
||||
def get_disk_usage():
|
||||
# This is the location where Docker stores its containers. It's currently
|
||||
# hardcoded in Docker so it's not configurable yet.
|
||||
st = statvfs()
|
||||
return {
|
||||
'total': st.total,
|
||||
'available': st.free,
|
||||
'used': st.used
|
||||
}
|
||||
|
||||
|
||||
def get_total_vcpus():
|
||||
return multiprocessing.cpu_count()
|
||||
|
||||
|
||||
def get_vcpus_used(containers):
|
||||
total_vcpus_used = 0
|
||||
for container in containers:
|
||||
if isinstance(container, dict):
|
||||
total_vcpus_used += container.get('Config', {}).get(
|
||||
'CpuShares', 0) / 1024
|
||||
|
||||
return total_vcpus_used
|
||||
|
||||
|
||||
def get_memory_usage():
|
||||
vmem = psutil.virtual_memory()
|
||||
return {
|
||||
'total': vmem.total,
|
||||
'used': vmem.used
|
||||
}
|
||||
|
||||
|
||||
def get_mounts():
|
||||
with open('/proc/mounts') as f:
|
||||
return f.readlines()
|
||||
|
||||
|
||||
def get_cgroup_devices_path():
|
||||
for ln in get_mounts():
|
||||
fields = ln.split(' ')
|
||||
if fields[2] == 'cgroup' and 'devices' in fields[3].split(','):
|
||||
return fields[1]
|
71
nova/virt/docker/network.py
Normal file
71
nova/virt/docker/network.py
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log
|
||||
|
||||
from nova import exception
|
||||
from nova import utils
|
||||
|
||||
from nova.i18n import _
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def teardown_network(container_id):
|
||||
if os.name == 'nt':
|
||||
return
|
||||
|
||||
try:
|
||||
output, err = utils.execute('ip', '-o', 'netns', 'list')
|
||||
for line in output.split('\n'):
|
||||
if container_id == line.strip():
|
||||
utils.execute('ip', 'netns', 'delete', container_id,
|
||||
run_as_root=True)
|
||||
break
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.warning(_('Cannot remove network namespace, netns id: %s'),
|
||||
container_id)
|
||||
|
||||
|
||||
def find_fixed_ips(instance, network_info):
|
||||
ips = []
|
||||
for subnet in network_info['subnets']:
|
||||
netmask = subnet['cidr'].split('/')[1]
|
||||
for ip in subnet['ips']:
|
||||
if ip['type'] == 'fixed' and ip['address']:
|
||||
ips.append(ip['address'] + "/" + netmask)
|
||||
if not ips:
|
||||
raise exception.InstanceDeployFailure(_('Cannot find fixed ip'),
|
||||
instance_id=instance['uuid'])
|
||||
return ips
|
||||
|
||||
|
||||
def find_gateways(instance, network_info):
|
||||
gateways = []
|
||||
for subnet in network_info['subnets']:
|
||||
gateways.append(subnet['gateway']['address'])
|
||||
if not gateways:
|
||||
raise exception.InstanceDeployFailure(_('Cannot find gateway'),
|
||||
instance_id=instance['uuid'])
|
||||
return gateways
|
||||
|
||||
|
||||
# NOTE(arosen) - this method should be removed after it's moved into the
|
||||
# linux_net code in nova.
|
||||
def get_ovs_interfaceid(vif):
|
||||
return vif.get('ovs_interfaceid') or vif['id']
|
177
nova/virt/docker/opencontrail.py
Normal file
177
nova/virt/docker/opencontrail.py
Normal file
@ -0,0 +1,177 @@
|
||||
# Copyright (C) 2014 Juniper Networks, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
|
||||
from contrail_vrouter_api.vrouter_api import ContrailVRouterApi
|
||||
|
||||
from nova.network import linux_net
|
||||
from nova import utils
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
|
||||
from nova.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenContrailVIFDriver(object):
|
||||
def __init__(self):
|
||||
self._vrouter_semaphore = eventlet.semaphore.Semaphore()
|
||||
self._vrouter_client = ContrailVRouterApi(
|
||||
doconnect=True, semaphore=self._vrouter_semaphore)
|
||||
timer = loopingcall.FixedIntervalLoopingCall(self._keep_alive)
|
||||
timer.start(interval=2)
|
||||
|
||||
def _keep_alive(self):
|
||||
self._vrouter_client.periodic_connection_check()
|
||||
|
||||
def plug(self, instance, vif):
|
||||
vif_type = vif['type']
|
||||
|
||||
LOG.debug('Plug vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
if_local_name = 'veth%s' % vif['id'][:8]
|
||||
if_remote_name = 'ns%s' % vif['id'][:8]
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
undo_mgr = utils.UndoManager()
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'add', if_local_name, 'type', 'veth',
|
||||
'peer', 'name', if_remote_name, run_as_root=True)
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'ip', 'link', 'delete', if_local_name, run_as_root=True))
|
||||
|
||||
utils.execute('ip', 'link', 'set', if_remote_name, 'address',
|
||||
vif['address'], run_as_root=True)
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure network")
|
||||
msg = _('Failed to setup the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
def attach(self, instance, vif, container_id):
|
||||
vif_type = vif['type']
|
||||
|
||||
LOG.debug('Attach vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
if_local_name = 'veth%s' % vif['id'][:8]
|
||||
if_remote_name = 'ns%s' % vif['id'][:8]
|
||||
|
||||
undo_mgr = utils.UndoManager()
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'ip', 'link', 'delete', if_local_name, run_as_root=True))
|
||||
ipv4_address, ipv4_netmask, ipv4_gateway = self._retrieve_ip_address(
|
||||
vif, 4)
|
||||
ipv6_address, ipv6_netmask, ipv6_gateway = self._retrieve_ip_address(
|
||||
vif, 6)
|
||||
ipv4_address = ipv4_address or '0.0.0.0'
|
||||
params = {
|
||||
'ip_address': ipv4_address,
|
||||
'vn_id': vif['network']['id'],
|
||||
'display_name': instance['display_name'],
|
||||
'hostname': instance['hostname'],
|
||||
'host': instance['host'],
|
||||
'vm_project_id': instance['project_id'],
|
||||
'port_type': 'NovaVMPort',
|
||||
'ip6_address': ipv6_address,
|
||||
}
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'set', if_remote_name, 'netns',
|
||||
container_id, run_as_root=True)
|
||||
|
||||
result = self._vrouter_client.add_port(
|
||||
instance['uuid'], vif['id'],
|
||||
if_local_name, vif['address'], **params)
|
||||
if not result:
|
||||
# follow the exception path
|
||||
raise RuntimeError('add_port returned %s' % str(result))
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception("Failed to attach the network")
|
||||
msg = _('Failed to attach the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link',
|
||||
'set', if_remote_name, 'address', vif['address'],
|
||||
run_as_root=True)
|
||||
if ipv6_address:
|
||||
ip = ipv6_address + "/" + ipv6_netmask
|
||||
gateway = ipv6_gateway
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ifconfig',
|
||||
if_remote_name, 'inet6', 'add', ip,
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', '-6',
|
||||
'route', 'replace', 'default', 'via', gateway,
|
||||
'dev', if_remote_name, run_as_root=True)
|
||||
if ipv4_address != '0.0.0.0':
|
||||
ip = ipv4_address + "/" + ipv4_netmask
|
||||
gateway = ipv4_gateway
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ifconfig',
|
||||
if_remote_name, ip, run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip',
|
||||
'route', 'replace', 'default', 'via', gateway,
|
||||
'dev', if_remote_name, run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link',
|
||||
'set', if_remote_name, 'up', run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to attach vif"), instance=instance)
|
||||
|
||||
def _retrieve_ip_address(self, vif, version):
|
||||
address = None
|
||||
netmask = None
|
||||
gateway = None
|
||||
if 'subnets' in vif['network']:
|
||||
subnets = vif['network']['subnets']
|
||||
for subnet in subnets:
|
||||
ips = subnet['ips'][0]
|
||||
if (ips['version'] == version):
|
||||
if ips['address'] is not None:
|
||||
address = ips['address']
|
||||
netmask = subnet['cidr'].split('/')[1]
|
||||
gateway = subnet['gateway']['address']
|
||||
|
||||
return address, netmask, gateway
|
||||
|
||||
def unplug(self, instance, vif):
|
||||
vif_type = vif['type']
|
||||
if_local_name = 'veth%s' % vif['id'][:8]
|
||||
|
||||
LOG.debug('Unplug vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
try:
|
||||
self._vrouter_client.delete_port(vif['id'])
|
||||
if linux_net.device_exists(if_local_name):
|
||||
utils.execute('ip', 'link', 'delete', if_local_name,
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Delete port failed"), instance=instance)
|
492
nova/virt/docker/vifs.py
Normal file
492
nova/virt/docker/vifs.py
Normal file
@ -0,0 +1,492 @@
|
||||
# Copyright (C) 2013 VMware, Inc
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.network import linux_net
|
||||
from nova.network import manager
|
||||
from nova.network import model as network_model
|
||||
from nova import utils
|
||||
from nova.virt.docker import network
|
||||
from oslo_config import cfg
|
||||
import random
|
||||
|
||||
# We need config opts from manager, but pep8 complains, this silences it.
|
||||
assert manager
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('my_ip', 'nova.conf.netconf')
|
||||
CONF.import_opt('vlan_interface', 'nova.manager')
|
||||
CONF.import_opt('flat_interface', 'nova.manager')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerGenericVIFDriver(object):
|
||||
|
||||
def plug(self, instance, vif):
|
||||
vif_type = vif['type']
|
||||
|
||||
LOG.debug('plug vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
if vif_type is None:
|
||||
raise exception.NovaException(
|
||||
_("vif_type parameter must be present "
|
||||
"for this vif_driver implementation"))
|
||||
|
||||
if vif_type == network_model.VIF_TYPE_BRIDGE:
|
||||
self.plug_bridge(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_OVS:
|
||||
if self.ovs_hybrid_required(vif):
|
||||
self.plug_ovs_hybrid(instance, vif)
|
||||
else:
|
||||
self.plug_ovs(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_MIDONET:
|
||||
self.plug_midonet(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_IOVISOR:
|
||||
self.plug_iovisor(instance, vif)
|
||||
elif vif_type == 'hyperv':
|
||||
self.plug_windows(instance, vif)
|
||||
else:
|
||||
raise exception.NovaException(
|
||||
_("Unexpected vif_type=%s") % vif_type)
|
||||
|
||||
def plug_windows(self, instance, vif):
|
||||
pass
|
||||
|
||||
def plug_iovisor(self, instance, vif):
|
||||
"""Plug docker vif into IOvisor
|
||||
|
||||
Creates a port on IOvisor and onboards the interface
|
||||
"""
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
|
||||
iface_id = vif['id']
|
||||
net_id = vif['network']['id']
|
||||
tenant_id = instance['project_id']
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
undo_mgr = utils.UndoManager()
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type',
|
||||
'veth', 'peer', 'name', if_remote_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ifc_ctl', 'gateway', 'add_port', if_local_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ifc_ctl', 'gateway', 'ifup', if_local_name,
|
||||
'access_vm',
|
||||
vif['network']['label'] + "_" + iface_id,
|
||||
vif['address'], 'pgtag2=%s' % net_id,
|
||||
'pgtag1=%s' % tenant_id, run_as_root=True)
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure network on IOvisor")
|
||||
msg = _('Failed to setup the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
def plug_ovs(self, instance, vif):
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
bridge = vif['network']['bridge']
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
undo_mgr = utils.UndoManager()
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type',
|
||||
'veth', 'peer', 'name', if_remote_name,
|
||||
run_as_root=True)
|
||||
linux_net.create_ovs_vif_port(bridge, if_local_name,
|
||||
network.get_ovs_interfaceid(vif),
|
||||
vif['address'],
|
||||
instance['uuid'])
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure network")
|
||||
msg = _('Failed to setup the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
def plug_midonet(self, instance, vif):
|
||||
"""Plug into MidoNet's network port
|
||||
|
||||
This accomplishes binding of the vif to a MidoNet virtual port
|
||||
"""
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
port_id = network.get_ovs_interfaceid(vif)
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
|
||||
undo_mgr = utils.UndoManager()
|
||||
try:
|
||||
utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type',
|
||||
'veth', 'peer', 'name', if_remote_name,
|
||||
run_as_root=True)
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'ip', 'link', 'delete', if_local_name, run_as_root=True))
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
utils.execute('mm-ctl', '--bind-port', port_id, if_local_name,
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure network")
|
||||
msg = _('Failed to setup the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
def plug_ovs_hybrid(self, instance, vif):
|
||||
"""Plug using hybrid strategy
|
||||
|
||||
Create a per-VIF linux bridge, then link that bridge to the OVS
|
||||
integration bridge via a veth device, setting up the other end
|
||||
of the veth device just like a normal OVS port. Then boot the
|
||||
VIF on the linux bridge. and connect the tap port to linux bridge
|
||||
"""
|
||||
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
iface_id = self.get_ovs_interfaceid(vif)
|
||||
br_name = self.get_br_name(vif['id'])
|
||||
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
undo_mgr = utils.UndoManager()
|
||||
|
||||
try:
|
||||
if not linux_net.device_exists(br_name):
|
||||
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
|
||||
# Incase of failure undo the Steps
|
||||
# Deleting/Undoing the interface will delete all
|
||||
# associated resources
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'brctl', 'delbr', br_name, run_as_root=True))
|
||||
# LOG.exception('Throw Test exception with bridgename %s'
|
||||
# % br_name)
|
||||
|
||||
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
|
||||
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
|
||||
utils.execute('tee',
|
||||
('/sys/class/net/%s/bridge/multicast_snooping' %
|
||||
br_name),
|
||||
process_input='0',
|
||||
run_as_root=True,
|
||||
check_exit_code=[0, 1])
|
||||
|
||||
if not linux_net.device_exists(v2_name):
|
||||
linux_net._create_veth_pair(v1_name, v2_name)
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'ip', 'link', 'delete', v1_name, run_as_root=True))
|
||||
|
||||
utils.execute('ip', 'link', 'set', br_name, 'up',
|
||||
run_as_root=True)
|
||||
undo_mgr.undo_with(lambda: utils.execute('ip', 'link', 'set',
|
||||
br_name, 'down',
|
||||
run_as_root=True))
|
||||
|
||||
# Deleting/Undoing the interface will delete all
|
||||
# associated resources (remove from the bridge, its
|
||||
# pair, etc...)
|
||||
utils.execute('brctl', 'addif', br_name, v1_name,
|
||||
run_as_root=True)
|
||||
|
||||
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
|
||||
v2_name,
|
||||
iface_id, vif['address'],
|
||||
instance['uuid'])
|
||||
undo_mgr.undo_with(
|
||||
lambda: utils.execute('ovs-vsctl', 'del-port',
|
||||
self.get_bridge_name(vif),
|
||||
v2_name, run_as_root=True))
|
||||
|
||||
utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type',
|
||||
'veth', 'peer', 'name', if_remote_name,
|
||||
run_as_root=True)
|
||||
undo_mgr.undo_with(
|
||||
lambda: utils.execute('ip', 'link', 'delete', if_local_name,
|
||||
run_as_root=True))
|
||||
|
||||
# Deleting/Undoing the interface will delete all
|
||||
# associated resources (remove from the bridge, its pair, etc...)
|
||||
utils.execute('brctl', 'addif', br_name, if_local_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
msg = "Failed to configure Network." \
|
||||
" Rolling back the network interfaces %s %s %s %s " % (
|
||||
br_name, if_local_name, v1_name, v2_name)
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
# We are creating our own mac's now because the linux bridge interface
|
||||
# takes on the lowest mac that is assigned to it. By using FE range
|
||||
# mac's we prevent the interruption and possible loss of networking
|
||||
# from changing mac addresses.
|
||||
def _fe_random_mac(self):
|
||||
mac = [0xfe, 0xed,
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff)]
|
||||
return ':'.join(map(lambda x: "%02x" % x, mac))
|
||||
|
||||
def plug_bridge(self, instance, vif):
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
bridge = vif['network']['bridge']
|
||||
gateway = network.find_gateway(instance, vif['network'])
|
||||
|
||||
net = vif['network']
|
||||
if net.get_meta('should_create_vlan', False):
|
||||
vlan = net.get_meta('vlan'),
|
||||
iface = (CONF.vlan_interface or
|
||||
vif['network'].get_meta('bridge_interface'))
|
||||
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
|
||||
vlan,
|
||||
bridge,
|
||||
iface,
|
||||
net_attrs=vif,
|
||||
mtu=vif.get('mtu'))
|
||||
iface = 'vlan%s' % vlan
|
||||
else:
|
||||
iface = (CONF.flat_interface or
|
||||
vif['network'].get_meta('bridge_interface'))
|
||||
LOG.debug('Ensuring bridge for %s - %s' % (iface, bridge))
|
||||
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
|
||||
bridge,
|
||||
iface,
|
||||
net_attrs=vif,
|
||||
gateway=gateway)
|
||||
|
||||
# Device already exists so return.
|
||||
if linux_net.device_exists(if_local_name):
|
||||
return
|
||||
undo_mgr = utils.UndoManager()
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type',
|
||||
'veth', 'peer', 'name', if_remote_name,
|
||||
run_as_root=True)
|
||||
undo_mgr.undo_with(lambda: utils.execute(
|
||||
'ip', 'link', 'delete', if_local_name, run_as_root=True))
|
||||
# NOTE(samalba): Deleting the interface will delete all
|
||||
# associated resources (remove from the bridge, its pair, etc...)
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'address',
|
||||
self._fe_random_mac(), run_as_root=True)
|
||||
utils.execute('brctl', 'addif', bridge, if_local_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'link', 'set', if_local_name, 'up',
|
||||
run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure network")
|
||||
msg = _('Failed to setup the network, rolling back')
|
||||
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
|
||||
|
||||
def unplug(self, instance, vif):
|
||||
vif_type = vif['type']
|
||||
|
||||
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
if vif_type is None:
|
||||
raise exception.NovaException(
|
||||
_("vif_type parameter must be present "
|
||||
"for this vif_driver implementation"))
|
||||
|
||||
if vif_type == network_model.VIF_TYPE_BRIDGE:
|
||||
self.unplug_bridge(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_OVS:
|
||||
if self.ovs_hybrid_required(vif):
|
||||
self.unplug_ovs_hybrid(instance, vif)
|
||||
else:
|
||||
self.unplug_ovs(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_MIDONET:
|
||||
self.unplug_midonet(instance, vif)
|
||||
elif vif_type == network_model.VIF_TYPE_IOVISOR:
|
||||
self.unplug_iovisor(instance, vif)
|
||||
elif vif_type == 'hyperv':
|
||||
self.unplug_windows(instance, vif)
|
||||
else:
|
||||
raise exception.NovaException(
|
||||
_("Unexpected vif_type=%s") % vif_type)
|
||||
|
||||
def unplug_windows(self, instance, vif):
|
||||
pass
|
||||
|
||||
def unplug_iovisor(self, instance, vif):
|
||||
"""Unplug vif from IOvisor
|
||||
|
||||
Offboard an interface and deletes port from IOvisor
|
||||
"""
|
||||
if_local_name = 'tap%s' % vif['id'][:11]
|
||||
iface_id = vif['id']
|
||||
try:
|
||||
utils.execute('ifc_ctl', 'gateway', 'ifdown',
|
||||
if_local_name, 'access_vm',
|
||||
vif['network']['label'] + "_" + iface_id,
|
||||
vif['address'], run_as_root=True)
|
||||
utils.execute('ifc_ctl', 'gateway', 'del_port', if_local_name,
|
||||
run_as_root=True)
|
||||
linux_net.delete_net_dev(if_local_name)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_("Failed while unplugging vif"), instance=instance)
|
||||
|
||||
def unplug_ovs(self, instance, vif):
|
||||
"""Unplug the VIF by deleting the port from the bridge."""
|
||||
try:
|
||||
linux_net.delete_ovs_vif_port(vif['network']['bridge'],
|
||||
vif['devname'])
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_("Failed while unplugging vif"), instance=instance)
|
||||
|
||||
def unplug_midonet(self, instance, vif):
|
||||
"""Unplug into MidoNet's network port
|
||||
|
||||
This accomplishes unbinding of the vif from its MidoNet virtual port
|
||||
"""
|
||||
try:
|
||||
utils.execute('mm-ctl', '--unbind-port',
|
||||
network.get_ovs_interfaceid(vif), run_as_root=True)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_("Failed while unplugging vif"), instance=instance)
|
||||
|
||||
def unplug_ovs_hybrid(self, instance, vif):
|
||||
"""UnPlug using hybrid strategy
|
||||
|
||||
Unhook port from OVS, unhook port from bridge, delete
|
||||
bridge, and delete both veth devices.
|
||||
"""
|
||||
try:
|
||||
br_name = self.get_br_name(vif['id'])
|
||||
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
|
||||
|
||||
if linux_net.device_exists(br_name):
|
||||
utils.execute('brctl', 'delif', br_name, v1_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'link', 'set', br_name, 'down',
|
||||
run_as_root=True)
|
||||
utils.execute('brctl', 'delbr', br_name,
|
||||
run_as_root=True)
|
||||
|
||||
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
|
||||
v2_name)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_("Failed while unplugging vif"), instance=instance)
|
||||
|
||||
def unplug_bridge(self, instance, vif):
|
||||
# NOTE(arosen): nothing has to be done in the linuxbridge case
|
||||
# as when the veth is deleted it automatically is removed from
|
||||
# the bridge.
|
||||
pass
|
||||
|
||||
def attach(self, instance, vif, container_id):
|
||||
vif_type = vif['type']
|
||||
if_remote_name = 'ns%s' % vif['id'][:11]
|
||||
gateways = network.find_gateways(instance, vif['network'])
|
||||
ips = network.find_fixed_ips(instance, vif['network'])
|
||||
|
||||
LOG.debug('attach vif_type=%(vif_type)s instance=%(instance)s '
|
||||
'vif=%(vif)s',
|
||||
{'vif_type': vif_type, 'instance': instance,
|
||||
'vif': vif})
|
||||
|
||||
try:
|
||||
utils.execute('ip', 'link', 'set', if_remote_name, 'netns',
|
||||
container_id, run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link',
|
||||
'set', if_remote_name, 'address', vif['address'],
|
||||
run_as_root=True)
|
||||
for ip in ips:
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip',
|
||||
'addr', 'add', ip, 'dev', if_remote_name,
|
||||
run_as_root=True)
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link',
|
||||
'set', if_remote_name, 'up', run_as_root=True)
|
||||
|
||||
# Setup MTU on if_remote_name is required if it is a non
|
||||
# default value
|
||||
mtu = None
|
||||
if vif.get('mtu') is not None:
|
||||
mtu = vif.get('mtu')
|
||||
if mtu is not None:
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ip',
|
||||
'link', 'set', if_remote_name, 'mtu', mtu,
|
||||
run_as_root=True)
|
||||
|
||||
for gateway in gateways:
|
||||
utils.execute('ip', 'netns', 'exec', container_id,
|
||||
'ip', 'route', 'replace', 'default', 'via',
|
||||
gateway, 'dev', if_remote_name, run_as_root=True)
|
||||
|
||||
# Disable TSO, for now no config option
|
||||
utils.execute('ip', 'netns', 'exec', container_id, 'ethtool',
|
||||
'--offload', if_remote_name, 'tso', 'off',
|
||||
run_as_root=True)
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Failed to attach vif")
|
||||
|
||||
def get_bridge_name(self, vif):
|
||||
return vif['network']['bridge']
|
||||
|
||||
def get_ovs_interfaceid(self, vif):
|
||||
return vif.get('ovs_interfaceid') or vif['id']
|
||||
|
||||
def get_br_name(self, iface_id):
|
||||
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
|
||||
|
||||
def get_veth_pair_names(self, iface_id):
|
||||
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
|
||||
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
|
||||
|
||||
def ovs_hybrid_required(self, vif):
|
||||
ovs_hybrid_required = self.get_firewall_required(vif) or \
|
||||
self.get_hybrid_plug_enabled(vif)
|
||||
return ovs_hybrid_required
|
||||
|
||||
def get_firewall_required(self, vif):
|
||||
if vif.get('details'):
|
||||
enabled = vif['details'].get('port_filter', False)
|
||||
if enabled:
|
||||
return False
|
||||
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_hybrid_plug_enabled(self, vif):
|
||||
if vif.get('details'):
|
||||
return vif['details'].get('ovs_hybrid_plug', False)
|
||||
return False
|
35
nova/virt/hostutils.py
Normal file
35
nova/virt/hostutils.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright (c) 2014 Docker, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import ctypes
|
||||
import datetime
|
||||
import os
|
||||
import time
|
||||
|
||||
from nova import utils
|
||||
|
||||
|
||||
def sys_uptime():
|
||||
"""Returns the host uptime."""
|
||||
|
||||
if os.name == 'nt':
|
||||
tick_count64 = ctypes.windll.kernel32.GetTickCount64()
|
||||
return ("%s up %s, 0 users, load average: 0, 0, 0" %
|
||||
(str(time.strftime("%H:%M:%S")),
|
||||
str(datetime.timedelta(milliseconds=long(tick_count64)))))
|
||||
else:
|
||||
out, err = utils.execute('env', 'LANG=C', 'uptime')
|
||||
return out
|
@ -11,6 +11,7 @@ pbr>=1.8 # Apache-2.0
|
||||
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
|
||||
python-etcd>=0.4.3 # MIT License
|
||||
python-glanceclient>=2.5.0 # Apache-2.0
|
||||
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
|
||||
oslo.i18n>=2.1.0 # Apache-2.0
|
||||
oslo.log>=3.11.0 # Apache-2.0
|
||||
oslo.concurrency>=3.8.0 # Apache-2.0
|
||||
|
@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
from glanceclient import client as glanceclient
|
||||
from novaclient import client as novaclient
|
||||
from oslo_log import log as logging
|
||||
|
||||
from zun.common import exception
|
||||
@ -29,6 +30,7 @@ class OpenStackClients(object):
|
||||
self.context = context
|
||||
self._keystone = None
|
||||
self._glance = None
|
||||
self._nova = None
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.keystone().session.get_endpoint(**kwargs)
|
||||
@ -83,3 +85,14 @@ class OpenStackClients(object):
|
||||
self._glance = glanceclient.Client(glanceclient_version, **args)
|
||||
|
||||
return self._glance
|
||||
|
||||
@exception.wrap_keystone_exception
|
||||
def nova(self):
|
||||
if self._nova:
|
||||
return self._nova
|
||||
|
||||
nova_api_version = self._get_client_option('nova', 'api_version')
|
||||
session = self.keystone().session
|
||||
self._nova = novaclient.Client(nova_api_version, session=session)
|
||||
|
||||
return self._nova
|
||||
|
@ -368,3 +368,17 @@ class DockerError(ZunException):
|
||||
|
||||
class PollTimeOut(ZunException):
|
||||
message = _("Polling request timed out.")
|
||||
|
||||
|
||||
class ServerInError(ZunException):
|
||||
message = _('Went to status %(resource_status)s due to '
|
||||
'"%(status_reason)s"')
|
||||
|
||||
|
||||
class ServerUnknownStatus(ZunException):
|
||||
message = _('%(result)s - Unknown status %(resource_status)s due to '
|
||||
'"%(status_reason)s"')
|
||||
|
||||
|
||||
class EntityNotFound(ZunException):
|
||||
message = _("The %(entity)s (%(name)s) could not be found.")
|
||||
|
250
zun/common/nova.py
Normal file
250
zun/common/nova.py
Normal file
@ -0,0 +1,250 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import requests
|
||||
import six
|
||||
|
||||
from novaclient import exceptions
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from zun.common import clients
|
||||
from zun.common import exception
|
||||
from zun.common.i18n import _
|
||||
from zun.common.i18n import _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def retry_if_connection_err(exception):
|
||||
return isinstance(exception, requests.ConnectionError)
|
||||
|
||||
|
||||
class NovaClient(object):
|
||||
|
||||
deferred_server_statuses = ['BUILD',
|
||||
'HARD_REBOOT',
|
||||
'PASSWORD',
|
||||
'REBOOT',
|
||||
'RESCUE',
|
||||
'RESIZE',
|
||||
'REVERT_RESIZE',
|
||||
'SHUTOFF',
|
||||
'SUSPENDED',
|
||||
'VERIFY_RESIZE']
|
||||
|
||||
def __init__(self, context):
|
||||
self.context = context
|
||||
self._client = None
|
||||
|
||||
def client(self):
|
||||
if not self._client:
|
||||
self._client = clients.OpenStackClients(self.context).nova()
|
||||
return self._client
|
||||
|
||||
def is_not_found(self, ex):
|
||||
return isinstance(ex, exceptions.NotFound)
|
||||
|
||||
def create_server(self, name, image, flavor, **kwargs):
|
||||
image = self.get_image_by_name_or_id(image)
|
||||
flavor = self.get_flavor_by_name_or_id(flavor)
|
||||
return self.client().servers.create(name, image, flavor, **kwargs)
|
||||
|
||||
def get_image_by_name_or_id(self, image_ident):
|
||||
"""Get an image by name or ID."""
|
||||
try:
|
||||
return self.client().glance.find_image(image_ident)
|
||||
except exceptions.NotFound as e:
|
||||
raise exception.ImageNotFound(six.text_type(e))
|
||||
except exceptions.NoUniqueMatch as e:
|
||||
raise exception.Conflict(six.text_type(e))
|
||||
|
||||
def get_flavor_by_name_or_id(self, flavor_ident):
|
||||
"""Get the flavor object for the specified flavor name or id.
|
||||
|
||||
:param flavor_identifier: the name or id of the flavor to find
|
||||
:returns: a flavor object with name or id :flavor:
|
||||
"""
|
||||
try:
|
||||
flavor = self.client().flavors.get(flavor_ident)
|
||||
except exceptions.NotFound:
|
||||
flavor = self.client().flavors.find(name=flavor_ident)
|
||||
|
||||
return flavor
|
||||
|
||||
def fetch_server(self, server_id):
|
||||
"""Fetch fresh server object from Nova.
|
||||
|
||||
Log warnings and return None for non-critical API errors.
|
||||
Use this method in various ``check_*_complete`` resource methods,
|
||||
where intermittent errors can be tolerated.
|
||||
"""
|
||||
server = None
|
||||
try:
|
||||
server = self.client().servers.get(server_id)
|
||||
except exceptions.OverLimit as exc:
|
||||
LOG.warning(_LW("Received an OverLimit response when "
|
||||
"fetching server (%(id)s) : %(exception)s"),
|
||||
{'id': server_id,
|
||||
'exception': exc})
|
||||
except exceptions.ClientException as exc:
|
||||
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
|
||||
(500, 503))):
|
||||
LOG.warning(_LW("Received the following exception when "
|
||||
"fetching server (%(id)s) : %(exception)s"),
|
||||
{'id': server_id,
|
||||
'exception': exc})
|
||||
else:
|
||||
raise
|
||||
return server
|
||||
|
||||
def refresh_server(self, server):
|
||||
"""Refresh server's attributes.
|
||||
|
||||
Also log warnings for non-critical API errors.
|
||||
"""
|
||||
try:
|
||||
server.get()
|
||||
except exceptions.OverLimit as exc:
|
||||
LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit "
|
||||
"response during server.get(): %(exception)s"),
|
||||
{'name': server.name,
|
||||
'id': server.id,
|
||||
'exception': exc})
|
||||
except exceptions.ClientException as exc:
|
||||
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
|
||||
(500, 503))):
|
||||
LOG.warning(_LW('Server "%(name)s" (%(id)s) received the '
|
||||
'following exception during server.get(): '
|
||||
'%(exception)s'),
|
||||
{'name': server.name,
|
||||
'id': server.id,
|
||||
'exception': exc})
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_status(self, server):
|
||||
"""Return the server's status.
|
||||
|
||||
:param server: server object
|
||||
:returns: status as a string
|
||||
"""
|
||||
# Some clouds append extra (STATUS) strings to the status, strip it
|
||||
return server.status.split('(')[0]
|
||||
|
||||
def check_active(self, server):
|
||||
"""Check server status.
|
||||
|
||||
Accepts both server IDs and server objects.
|
||||
Returns True if server is ACTIVE,
|
||||
raises errors when server has an ERROR or unknown to Zun status,
|
||||
returns False otherwise.
|
||||
|
||||
"""
|
||||
# not checking with is_uuid_like as most tests use strings e.g. '1234'
|
||||
if isinstance(server, six.string_types):
|
||||
server = self.fetch_server(server)
|
||||
if server is None:
|
||||
return False
|
||||
else:
|
||||
status = self.get_status(server)
|
||||
else:
|
||||
status = self.get_status(server)
|
||||
if status != 'ACTIVE':
|
||||
self.refresh_server(server)
|
||||
status = self.get_status(server)
|
||||
|
||||
if status in self.deferred_server_statuses:
|
||||
return False
|
||||
elif status == 'ACTIVE':
|
||||
return True
|
||||
elif status == 'ERROR':
|
||||
fault = getattr(server, 'fault', {})
|
||||
raise exception.ServerInError(
|
||||
resource_status=status,
|
||||
status_reason=_("Message: %(message)s, Code: %(code)s") % {
|
||||
'message': fault.get('message', _('Unknown')),
|
||||
'code': fault.get('code', _('Unknown'))
|
||||
})
|
||||
else:
|
||||
raise exception.ServerUnknownStatus(
|
||||
resource_status=server.status,
|
||||
status_reason=_('Unknown'),
|
||||
result=_('Server is not active'))
|
||||
|
||||
def delete_server(self, server):
|
||||
server_id = self.get_server_id(server, raise_on_error=False)
|
||||
if server_id:
|
||||
self.client().servers.delete(server_id)
|
||||
return server_id
|
||||
|
||||
def check_delete_server_complete(self, server_id):
|
||||
"""Wait for server to disappear from Nova."""
|
||||
try:
|
||||
server = self.fetch_server(server_id)
|
||||
except Exception as exc:
|
||||
self.ignore_not_found(exc)
|
||||
return True
|
||||
if not server:
|
||||
return False
|
||||
task_state_in_nova = getattr(server, 'OS-EXT-STS:task_state', None)
|
||||
# the status of server won't change until the delete task has done
|
||||
if task_state_in_nova == 'deleting':
|
||||
return False
|
||||
|
||||
status = self.get_status(server)
|
||||
if status in ("DELETED", "SOFT_DELETED"):
|
||||
return True
|
||||
if status == 'ERROR':
|
||||
fault = getattr(server, 'fault', {})
|
||||
message = fault.get('message', 'Unknown')
|
||||
code = fault.get('code')
|
||||
errmsg = _("Server %(name)s delete failed: (%(code)s) "
|
||||
"%(message)s") % dict(name=server.name,
|
||||
code=code,
|
||||
message=message)
|
||||
raise exception.ServerInError(resource_status=status,
|
||||
status_reason=errmsg)
|
||||
return False
|
||||
|
||||
@excutils.exception_filter
|
||||
def ignore_not_found(self, ex):
|
||||
"""Raises the exception unless it is a not-found."""
|
||||
return self.is_not_found(ex)
|
||||
|
||||
def get_addresses(self, server):
|
||||
"""Return the server's IP address, fetching it from Nova."""
|
||||
try:
|
||||
server_id = self.get_server_id(server)
|
||||
server = self.client().servers.get(server_id)
|
||||
except exceptions.NotFound as ex:
|
||||
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
|
||||
{'server': server, 'ex': ex})
|
||||
else:
|
||||
return server.addresses
|
||||
|
||||
def get_server_id(self, server, raise_on_error=True):
|
||||
if uuidutils.is_uuid_like(server):
|
||||
return server
|
||||
elif isinstance(server, six.string_types):
|
||||
servers = self.client().servers.list(search_opts={'name': server})
|
||||
if len(servers) == 1:
|
||||
return servers[0].id
|
||||
|
||||
if raise_on_error:
|
||||
raise exception.ZunException(_(
|
||||
"Unable to get server id with name %s") % server)
|
||||
else:
|
||||
raise exception.ZunException(_("Unexpected server type"))
|
@ -18,10 +18,12 @@
|
||||
import eventlet
|
||||
import functools
|
||||
import mimetypes
|
||||
from oslo_utils import uuidutils
|
||||
import time
|
||||
|
||||
from oslo_context import context as common_context
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import uuidutils
|
||||
import pecan
|
||||
import six
|
||||
|
||||
@ -143,6 +145,36 @@ def check_container_id(function):
|
||||
return decorated_function
|
||||
|
||||
|
||||
def poll_until(retriever, condition=lambda value: value,
|
||||
sleep_time=1, time_out=None, success_msg=None,
|
||||
timeout_msg=None):
|
||||
"""Retrieves object until it passes condition, then returns it.
|
||||
|
||||
If time_out_limit is passed in, PollTimeOut will be raised once that
|
||||
amount of time is elapsed.
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
def poll_and_check():
|
||||
obj = retriever()
|
||||
if condition(obj):
|
||||
raise loopingcall.LoopingCallDone(retvalue=obj)
|
||||
if time_out is not None and time.time() - start_time > time_out:
|
||||
raise exception.PollTimeOut
|
||||
|
||||
try:
|
||||
poller = loopingcall.FixedIntervalLoopingCall(
|
||||
f=poll_and_check).start(sleep_time, initial_delay=False)
|
||||
poller.wait()
|
||||
LOG.info(success_msg)
|
||||
except exception.PollTimeOut:
|
||||
LOG.error(timeout_msg)
|
||||
raise
|
||||
except Exception:
|
||||
LOG.exception(_("Unexpected exception occurred."))
|
||||
raise
|
||||
|
||||
|
||||
def get_image_pull_policy(image_pull_policy, image_tag):
|
||||
if not image_pull_policy:
|
||||
if image_tag == 'latest':
|
||||
|
@ -21,6 +21,7 @@ from zun.conf import database
|
||||
from zun.conf import docker
|
||||
from zun.conf import glance_client
|
||||
from zun.conf import image_driver
|
||||
from zun.conf import nova_client
|
||||
from zun.conf import path
|
||||
from zun.conf import services
|
||||
from zun.conf import zun_client
|
||||
@ -34,6 +35,7 @@ database.register_opts(CONF)
|
||||
docker.register_opts(CONF)
|
||||
glance_client.register_opts(CONF)
|
||||
image_driver.register_opts(CONF)
|
||||
nova_client.register_opts(CONF)
|
||||
path.register_opts(CONF)
|
||||
services.register_opts(CONF)
|
||||
zun_client.register_opts(CONF)
|
||||
|
@ -20,6 +20,7 @@ driver_opts = [
|
||||
Possible values:
|
||||
|
||||
* ``docker.driver.DockerDriver``
|
||||
* ``docker.driver.NovaDockerDriver``
|
||||
|
||||
Services which consume this:
|
||||
|
||||
@ -29,6 +30,8 @@ Interdependencies to other options:
|
||||
|
||||
* None
|
||||
"""),
|
||||
cfg.IntOpt('default_sleep_time', default=1,
|
||||
help='Time to sleep (in seconds) during waiting for an event.'),
|
||||
cfg.IntOpt('default_timeout', default=60 * 10,
|
||||
help='Maximum time (in seconds) to wait for an event.'),
|
||||
]
|
||||
|
53
zun/conf/nova_client.py
Normal file
53
zun/conf/nova_client.py
Normal file
@ -0,0 +1,53 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2012 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from zun.common.i18n import _
|
||||
|
||||
|
||||
nova_group = cfg.OptGroup(name='nova_client',
|
||||
title='Options for the Nova client')
|
||||
|
||||
common_security_opts = [
|
||||
cfg.StrOpt('ca_file',
|
||||
help=_('Optional CA cert file to use in SSL connections.')),
|
||||
cfg.StrOpt('cert_file',
|
||||
help=_('Optional PEM-formatted certificate chain file.')),
|
||||
cfg.StrOpt('key_file',
|
||||
help=_('Optional PEM-formatted file that contains the '
|
||||
'private key.')),
|
||||
cfg.BoolOpt('insecure',
|
||||
default=False,
|
||||
help=_("If set, then the server's certificate will not "
|
||||
"be verified."))]
|
||||
|
||||
nova_client_opts = [
|
||||
cfg.StrOpt('api_version',
|
||||
default='2.37',
|
||||
help=_('Version of Nova API to use in novaclient.'))]
|
||||
|
||||
|
||||
ALL_OPTS = (nova_client_opts + common_security_opts)
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
conf.register_group(nova_group)
|
||||
conf.register_opts(ALL_OPTS, group=nova_group)
|
||||
|
||||
|
||||
def list_opts():
|
||||
return {nova_group: ALL_OPTS}
|
@ -16,7 +16,12 @@ import six
|
||||
from docker import errors
|
||||
from oslo_log import log as logging
|
||||
|
||||
from zun.common import exception
|
||||
from zun.common.i18n import _LE
|
||||
from zun.common.i18n import _LI
|
||||
from zun.common.i18n import _LW
|
||||
from zun.common import nova
|
||||
from zun.common import utils
|
||||
from zun.common.utils import check_container_id
|
||||
import zun.conf
|
||||
from zun.container.docker import utils as docker_utils
|
||||
@ -61,6 +66,7 @@ class DockerDriver(driver.ContainerDriver):
|
||||
% (image, name))
|
||||
|
||||
kwargs = {
|
||||
'name': self.get_container_name(container),
|
||||
'hostname': container.hostname,
|
||||
'command': container.command,
|
||||
'environment': container.environment,
|
||||
@ -230,7 +236,10 @@ class DockerDriver(driver.ContainerDriver):
|
||||
container.meta['sandbox_id'] = id
|
||||
|
||||
def get_sandbox_name(self, container):
|
||||
return 'sandbox-' + container.uuid
|
||||
return 'zun-sandbox-' + container.uuid
|
||||
|
||||
def get_container_name(self, container):
|
||||
return 'zun-' + container.uuid
|
||||
|
||||
def get_addresses(self, context, container):
|
||||
sandbox_id = self.get_sandbox_id(container)
|
||||
@ -245,3 +254,85 @@ class DockerDriver(driver.ContainerDriver):
|
||||
],
|
||||
}
|
||||
return addresses
|
||||
|
||||
|
||||
class NovaDockerDriver(DockerDriver):
|
||||
def create_sandbox(self, context, container, key_name=None,
|
||||
flavor='m1.small', image='kubernetes/pause',
|
||||
nics='auto'):
|
||||
name = self.get_sandbox_name(container)
|
||||
novaclient = nova.NovaClient(context)
|
||||
sandbox = novaclient.create_server(name=name, image=image,
|
||||
flavor=flavor, key_name=key_name,
|
||||
nics=nics)
|
||||
self._ensure_active(novaclient, sandbox)
|
||||
sandbox_id = self._find_container_by_server_name(name)
|
||||
return sandbox_id
|
||||
|
||||
def _ensure_active(self, novaclient, server, timeout=300):
|
||||
'''Wait until the Nova instance to become active.'''
|
||||
def _check_active():
|
||||
return novaclient.check_active(server)
|
||||
|
||||
success_msg = _LI("Created server %s successfully.") % server.id
|
||||
timeout_msg = _LE("Failed to create server %s. Timeout waiting for "
|
||||
"server to become active.") % server.id
|
||||
utils.poll_until(_check_active,
|
||||
sleep_time=CONF.default_sleep_time,
|
||||
time_out=timeout or CONF.default_timeout,
|
||||
success_msg=success_msg, timeout_msg=timeout_msg)
|
||||
|
||||
def delete_sandbox(self, context, sandbox_id):
|
||||
novaclient = nova.NovaClient(context)
|
||||
server_name = self._find_server_by_container_id(sandbox_id)
|
||||
if not server_name:
|
||||
LOG.warning(_LW("Cannot find server name for sandbox %s") %
|
||||
sandbox_id)
|
||||
return
|
||||
|
||||
server_id = novaclient.delete_server(server_name)
|
||||
self._ensure_deleted(novaclient, server_id)
|
||||
|
||||
def _ensure_deleted(self, novaclient, server_id, timeout=300):
|
||||
'''Wait until the Nova instance to be deleted.'''
|
||||
def _check_delete_complete():
|
||||
return novaclient.check_delete_server_complete(server_id)
|
||||
|
||||
success_msg = _LI("Delete server %s successfully.") % server_id
|
||||
timeout_msg = _LE("Failed to create server %s. Timeout waiting for "
|
||||
"server to be deleted.") % server_id
|
||||
utils.poll_until(_check_delete_complete,
|
||||
sleep_time=CONF.default_sleep_time,
|
||||
time_out=timeout or CONF.default_timeout,
|
||||
success_msg=success_msg, timeout_msg=timeout_msg)
|
||||
|
||||
def get_addresses(self, context, container):
|
||||
novaclient = nova.NovaClient(context)
|
||||
sandbox_id = self.get_sandbox_id(container)
|
||||
if sandbox_id:
|
||||
server_name = self._find_server_by_container_id(sandbox_id)
|
||||
if server_name:
|
||||
# TODO(hongbin): Standardize the format of addresses
|
||||
return novaclient.get_addresses(server_name)
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def _find_container_by_server_name(self, name):
|
||||
with docker_utils.docker_client() as docker:
|
||||
for info in docker.list_instances(inspect=True):
|
||||
if info['Config'].get('Hostname') == name:
|
||||
return info['Id']
|
||||
raise exception.ZunException(_(
|
||||
"Cannot find container with name %s") % name)
|
||||
|
||||
def _find_server_by_container_id(self, container_id):
|
||||
with docker_utils.docker_client() as docker:
|
||||
try:
|
||||
info = docker.inspect_container(container_id)
|
||||
return info['Config'].get('Hostname')
|
||||
except errors.APIError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
return None
|
||||
|
@ -126,6 +126,10 @@ class ContainerDriver(object):
|
||||
"""Retrieve sandbox name."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_container_name(self, container):
|
||||
"""Retrieve sandbox name."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_addresses(self, context, container):
|
||||
"""Retrieve IP addresses of the container."""
|
||||
raise NotImplementedError()
|
||||
|
Loading…
Reference in New Issue
Block a user