Mova NovaDocker driver to contrib

We decided to deprecated NovaDocker driver in the team meeting [1].
Therefore, we moved the driver to the contrib folder. NovaDocker
was an experimental driver that attempts to leverage Nova to provide
networking to containers. However, as kuryr integration implemented,
this driver is not needed anymore.

[1] http://eavesdrop.openstack.org/meetings/zun/2017/
    zun.2017-07-11-03.00.log.html

Change-Id: I770fe13a004dc56ad20fe8eca545f05c4504e48e
This commit is contained in:
Hongbin Lu 2017-05-28 20:35:53 -04:00
parent fe28a063f3
commit 6dc2866167
21 changed files with 60 additions and 277 deletions

View File

@ -13,7 +13,6 @@ enable_plugin kuryr-libnetwork https://git.openstack.org/openstack/kuryr-libnetw
#Uncomment below variables and enable nova and neutron
#services to use nova docker driver
#ZUN_DRIVER=nova-docker
#IP_VERSION=4
disable_service n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth

View File

@ -133,7 +133,7 @@ function configure_zun {
# upload_sandbox_image() - Upload sandbox image to glance
function upload_sandbox_image {
if [[ ${ZUN_DRIVER} == "docker" || ${ZUN_DRIVER} == "nova-docker" ]]; then
if [[ ${ZUN_DRIVER} == "docker" ]]; then
sudo docker pull kubernetes/pause
sudo docker save kubernetes/pause | openstack image create kubernetes/pause --public --container-format docker --disk-format raw
fi
@ -183,8 +183,6 @@ function create_zun_conf {
rm -f $ZUN_CONF
if [[ ${ZUN_DRIVER} == "docker" ]]; then
iniset $ZUN_CONF DEFAULT container_driver docker.driver.DockerDriver
elif [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
iniset $ZUN_CONF DEFAULT container_driver docker.driver.NovaDockerDriver
fi
if [[ ${ZUN_DB_TYPE} == "etcd" ]]; then
iniset $ZUN_CONF DEFAULT db_type etcd

View File

@ -1,7 +0,0 @@
# Plug-in overrides
ZUN_DRIVER=${ZUN_DRIVER:-docker}
if [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
export VIRT_DRIVER=zun
fi

View File

@ -6,7 +6,6 @@ set -o xtrace
echo_summary "zun's plugin.sh was called..."
source $DEST/zun/devstack/lib/zun
source $DEST/zun/devstack/lib/nova
(set -o posix; set)
if is_service_enabled zun-api zun-compute; then
@ -26,10 +25,6 @@ if is_service_enabled zun-api zun-compute; then
create_zun_accounts
fi
if [[ ${ZUN_DRIVER} == "nova-docker" ]]; then
configure_nova_docker
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize zun
init_zun

View File

@ -20,7 +20,6 @@ driver_opts = [
Possible values:
* ``docker.driver.DockerDriver``
* ``docker.driver.NovaDockerDriver``
Services which consume this:

View File

@ -23,7 +23,6 @@ from oslo_utils import timeutils
from zun.common import consts
from zun.common import exception
from zun.common.i18n import _
from zun.common import nova
from zun.common import utils
from zun.common.utils import check_container_id
import zun.conf
@ -799,117 +798,3 @@ class DockerDriver(driver.ContainerDriver):
addresses.update(update)
container.addresses = addresses
container.save(context)
class NovaDockerDriver(DockerDriver):
capabilities = {
"support_sandbox": True,
"support_standalone": False,
}
def add_security_group(self, context, container, security_group, **kwargs):
msg = "NovaDockerDriver does not support security_groups"
raise exception.ZunException(msg)
def create_sandbox(self, context, container, key_name=None,
flavor='m1.tiny', image='kubernetes/pause',
nics='auto'):
# FIXME(hongbin): We elevate to admin privilege because the default
# policy in nova disallows non-admin users to create instance in
# specified host. This is not ideal because all nova instances will
# be created at service admin tenant now, which breaks the
# multi-tenancy model. We need to fix it.
elevated = context.elevated()
novaclient = nova.NovaClient(elevated)
name = self.get_sandbox_name(container)
if container.host != CONF.host:
raise exception.ZunException(_(
"Host mismatch: container should be created at host '%s'.") %
container.host)
# NOTE(hongbin): The format of availability zone is ZONE:HOST:NODE
# However, we just want to specify host, so it is ':HOST:'
az = ':%s:' % container.host
sandbox = novaclient.create_server(name=name, image=image,
flavor=flavor, key_name=key_name,
nics=nics, availability_zone=az)
self._ensure_active(novaclient, sandbox)
sandbox_id = self._find_container_by_server_name(name)
return sandbox_id
def _ensure_active(self, novaclient, server, timeout=300):
"""Wait until the Nova instance to become active."""
def _check_active():
return novaclient.check_active(server)
success_msg = "Created server %s successfully." % server.id
timeout_msg = ("Failed to create server %s. Timeout waiting for "
"server to become active.") % server.id
utils.poll_until(_check_active,
sleep_time=CONF.default_sleep_time,
time_out=timeout or CONF.default_timeout,
success_msg=success_msg, timeout_msg=timeout_msg)
def delete_sandbox(self, context, sandbox_id):
elevated = context.elevated()
novaclient = nova.NovaClient(elevated)
server_name = self._find_server_by_container_id(sandbox_id)
if not server_name:
LOG.warning("Cannot find server name for sandbox %s", sandbox_id)
return
server_id = novaclient.delete_server(server_name)
self._ensure_deleted(novaclient, server_id)
def stop_sandbox(self, context, sandbox_id):
elevated = context.elevated()
novaclient = nova.NovaClient(elevated)
server_name = self._find_server_by_container_id(sandbox_id)
if not server_name:
LOG.warning("Cannot find server name for sandbox %s", sandbox_id)
return
novaclient.stop_server(server_name)
def _ensure_deleted(self, novaclient, server_id, timeout=300):
"""Wait until the Nova instance to be deleted."""
def _check_delete_complete():
return novaclient.check_delete_server_complete(server_id)
success_msg = "Delete server %s successfully." % server_id
timeout_msg = ("Failed to create server %s. Timeout waiting for "
"server to be deleted.") % server_id
utils.poll_until(_check_delete_complete,
sleep_time=CONF.default_sleep_time,
time_out=timeout or CONF.default_timeout,
success_msg=success_msg, timeout_msg=timeout_msg)
def get_addresses(self, context, container):
elevated = context.elevated()
novaclient = nova.NovaClient(elevated)
sandbox_id = container.get_sandbox_id()
if sandbox_id:
server_name = self._find_server_by_container_id(sandbox_id)
if server_name:
# TODO(hongbin): Standardize the format of addresses
return novaclient.get_addresses(server_name)
else:
return None
else:
return None
def _find_container_by_server_name(self, name):
with docker_utils.docker_client() as docker:
for info in docker.list_instances(inspect=True):
if info['Config'].get('Hostname') == name:
return info['Id']
raise exception.ZunException(_(
"Cannot find container with name %s") % name)
def _find_server_by_container_id(self, container_id):
with docker_utils.docker_client() as docker:
try:
info = docker.inspect_container(container_id)
return info['Config'].get('Hostname')
except errors.APIError as e:
if e.response.status_code != 404:
raise
return None

View File

@ -28,9 +28,6 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"KURYR_CONFIG_DIR=/etc/kuryr-libnetwork"
if [ "$driver" = "docker" ]; then
export DEVSTACK_LOCAL_CONFIG+=$'\n'"ZUN_DRIVER=docker"
elif [ "$driver" = "nova-docker" ]; then
export DEVSTACK_LOCAL_CONFIG+=$'\n'"ZUN_DRIVER=nova-docker"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IP_VERSION=4"
fi
if [ "$db" = "etcd" ]; then

View File

@ -13,13 +13,11 @@
from docker import errors
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
from zun.common import consts
from zun import conf
from zun.container.docker.driver import DockerDriver
from zun.container.docker.driver import NovaDockerDriver
from zun.container.docker import utils as docker_utils
from zun import objects
from zun.tests.unit.container import base
@ -495,97 +493,6 @@ class TestDockerDriver(base.DriverTestCase):
requested_network[0],
security_groups=None)
class TestNovaDockerDriver(base.DriverTestCase):
def setUp(self):
super(TestNovaDockerDriver, self).setUp()
self.driver = NovaDockerDriver()
@mock.patch(
'zun.container.docker.driver.NovaDockerDriver.get_sandbox_name')
@mock.patch('zun.common.nova.NovaClient')
@mock.patch('zun.container.docker.driver.NovaDockerDriver._ensure_active')
@mock.patch('zun.container.docker.driver.'
'NovaDockerDriver._find_container_by_server_name')
def test_create_sandbox(self, mock_find_container_by_server_name,
mock_ensure_active, mock_nova_client,
mock_get_sandbox_name):
nova_client_instance = mock.MagicMock()
nova_client_instance.create_server.return_value = 'server_instance'
mock_get_sandbox_name.return_value = 'test_sanbox_name'
mock_nova_client.return_value = nova_client_instance
mock_ensure_active.return_value = True
mock_find_container_by_server_name.return_value = \
'test_container_name_id'
mock_container = obj_utils.get_test_container(self.context,
host=conf.CONF.host)
result_sandbox_id = self.driver.create_sandbox(self.context,
mock_container)
mock_get_sandbox_name.assert_called_once_with(mock_container)
nova_client_instance.create_server.assert_called_once_with(
name='test_sanbox_name', image='kubernetes/pause',
flavor='m1.tiny', key_name=None,
nics='auto', availability_zone=':{0}:'.format(conf.CONF.host))
mock_ensure_active.assert_called_once_with(nova_client_instance,
'server_instance')
mock_find_container_by_server_name.assert_called_once_with(
'test_sanbox_name')
self.assertEqual(result_sandbox_id, 'test_container_name_id')
@mock.patch('zun.common.nova.NovaClient')
@mock.patch('zun.container.docker.driver.'
'NovaDockerDriver._find_server_by_container_id')
@mock.patch('zun.container.docker.driver.NovaDockerDriver._ensure_deleted')
def test_delete_sandbox(self, mock_ensure_delete,
mock_find_server_by_container_id, mock_nova_client
):
nova_client_instance = mock.MagicMock()
nova_client_instance.delete_server.return_value = 'delete_server_id'
mock_nova_client.return_value = nova_client_instance
mock_find_server_by_container_id.return_value = 'test_test_server_name'
mock_ensure_delete.return_value = True
self.driver.delete_sandbox(self.context, sandbox_id='test_sandbox_id')
mock_find_server_by_container_id.assert_called_once_with(
'test_sandbox_id')
nova_client_instance.delete_server.assert_called_once_with(
'test_test_server_name')
mock_ensure_delete.assert_called_once_with(nova_client_instance,
'delete_server_id')
@mock.patch('zun.common.nova.NovaClient')
@mock.patch('zun.container.docker.driver.'
'NovaDockerDriver._find_server_by_container_id')
def test_stop_sandbox(self, mock_find_server_by_container_id,
mock_nova_client):
nova_client_instance = mock.MagicMock()
nova_client_instance.stop_server.return_value = 'stop_server_id'
mock_nova_client.return_value = nova_client_instance
mock_find_server_by_container_id.return_value = 'test_test_server_name'
self.driver.stop_sandbox(self.context, sandbox_id='test_sandbox_id')
mock_find_server_by_container_id.assert_called_once_with(
'test_sandbox_id')
nova_client_instance.stop_server.assert_called_once_with(
'test_test_server_name')
@mock.patch('zun.container.docker.driver.'
'NovaDockerDriver._find_server_by_container_id')
@mock.patch('zun.common.nova.NovaClient')
def test_get_addresses(self, mock_nova_client,
mock_find_server_by_container_id):
nova_client_instance = mock.MagicMock()
nova_client_instance.get_addresses.return_value = 'test_address'
mock_nova_client.return_value = nova_client_instance
mock_find_server_by_container_id.return_value = 'test_test_server_name'
mock_container = mock.MagicMock()
mock_container.get_sandbox_id.return_value = 'test_sanbox_id'
result_address = self.driver.get_addresses(self.context,
mock_container)
mock_find_server_by_container_id.assert_called_once_with(
'test_sanbox_id')
nova_client_instance.get_addresses.assert_called_once_with(
'test_test_server_name')
self.assertEqual(result_address, 'test_address')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('zun.container.driver.ContainerDriver.get_host_mem')
@mock.patch(
@ -620,52 +527,3 @@ class TestNovaDockerDriver(base.DriverTestCase):
self.assertEqual('CentOS', node_obj.os)
self.assertEqual('3.10.0-123', node_obj.kernel_version)
self.assertEqual({'dev.type': 'product'}, node_obj.labels)
@mock.patch('tarfile.open')
def test_read_tar_image(self, mock_open):
fake_image = {'path': 'fake-path'}
mock_context_manager = mock.MagicMock()
mock_open.return_value = mock_context_manager
mock_file = mock.MagicMock()
mock_context_manager.__enter__.return_value = mock_file
mock_data = [{"Config": "fake_config",
"RepoTags": ["cirros:latest"],
"Layers": ["fake_layer", "fake_layer2"]}]
mock_file.extractfile.return_value.read.return_value = \
jsonutils.dumps(mock_data, separators=(',', ':'))
self.driver.read_tar_image(fake_image)
self.assertEqual('cirros', fake_image['repo'])
self.assertEqual('latest', fake_image['tag'])
@mock.patch('tarfile.open')
def test_read_tar_image_multi_tags(self, mock_open):
fake_image = {'path': 'fake-path'}
mock_context_manager = mock.MagicMock()
mock_open.return_value = mock_context_manager
mock_file = mock.MagicMock()
mock_context_manager.__enter__.return_value = mock_file
mock_data = [{"Config": "fake_config",
"RepoTags": ["cirros:latest", "cirros:0.3.4"],
"Layers": ["fake_layer", "fake_layer2"]}]
mock_file.extractfile.return_value.read.return_value = \
jsonutils.dumps(mock_data, separators=(',', ':'))
self.driver.read_tar_image(fake_image)
self.assertEqual('cirros', fake_image['repo'])
self.assertEqual('latest', fake_image['tag'])
@mock.patch('tarfile.open')
def test_read_tar_image_fail(self, mock_open):
fake_image = {'path': 'fake-path'}
mock_context_manager = mock.MagicMock()
mock_open.return_value = mock_context_manager
mock_file = mock.MagicMock()
mock_context_manager.__enter__.return_value = mock_file
mock_data = [{"Config": "fake_config"}]
mock_file.extractfile.return_value.read.return_value = \
jsonutils.dumps(mock_data, separators=(',', ':'))
self.driver.read_tar_image(fake_image)
self.assertTrue('repo' not in fake_image)
self.assertTrue('tag' not in fake_image)

View File

@ -0,0 +1,59 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from zun.container.docker import utils as docker_utils
from zun.tests.unit.container import base
class TestDockerHTTPClient(base.DriverTestCase):
def setUp(self):
super(TestDockerHTTPClient, self).setUp()
self.client = docker_utils.DockerHTTPClient()
@mock.patch('tarfile.open')
def test_read_tar_image(self, mock_open):
fake_image = {'path': 'fake-path'}
mock_context_manager = mock.MagicMock()
mock_open.return_value = mock_context_manager
mock_file = mock.MagicMock()
mock_context_manager.__enter__.return_value = mock_file
mock_data = [{"Config": "fake_config",
"RepoTags": ["cirros:latest"],
"Layers": ["fake_layer", "fake_layer2"]}]
mock_file.extractfile.return_value.read.return_value = \
jsonutils.dumps(mock_data, separators=(',', ':'))
self.client.read_tar_image(fake_image)
self.assertEqual('cirros', fake_image['repo'])
self.assertEqual('latest', fake_image['tag'])
@mock.patch('tarfile.open')
def test_read_tar_image_multi_tags(self, mock_open):
fake_image = {'path': 'fake-path'}
mock_context_manager = mock.MagicMock()
mock_open.return_value = mock_context_manager
mock_file = mock.MagicMock()
mock_context_manager.__enter__.return_value = mock_file
mock_data = [{"Config": "fake_config",
"RepoTags": ["cirros:latest", "cirros:0.3.4"],
"Layers": ["fake_layer", "fake_layer2"]}]
mock_file.extractfile.return_value.read.return_value = \
jsonutils.dumps(mock_data, separators=(',', ':'))
self.client.read_tar_image(fake_image)
self.assertEqual('cirros', fake_image['repo'])
self.assertEqual('latest', fake_image['tag'])