Merge trunk, resolve conflicts, and rename 010_ migrate file to 011_ since another migrate file got into trunk ahead of this...
This commit is contained in:
		@@ -94,7 +94,7 @@ def init_leases(interface):
 | 
			
		||||
    """Get the list of hosts for an interface."""
 | 
			
		||||
    ctxt = context.get_admin_context()
 | 
			
		||||
    network_ref = db.network_get_by_bridge(ctxt, interface)
 | 
			
		||||
    return linux_net.get_dhcp_hosts(ctxt, network_ref['id'])
 | 
			
		||||
    return linux_net.get_dhcp_leases(ctxt, network_ref['id'])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
 
 | 
			
		||||
@@ -276,7 +276,7 @@ def _db_error(caught_exception):
 | 
			
		||||
    print caught_exception
 | 
			
		||||
    print _("The above error may show that the database has not "
 | 
			
		||||
            "been created.\nPlease create a database using "
 | 
			
		||||
            "nova-manage sync db before running this command.")
 | 
			
		||||
            "'nova-manage db sync' before running this command.")
 | 
			
		||||
    exit(1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -437,6 +437,8 @@ class ProjectCommands(object):
 | 
			
		||||
                    "been created.\nPlease create a database by running a "
 | 
			
		||||
                    "nova-api server on this host.")
 | 
			
		||||
 | 
			
		||||
AccountCommands = ProjectCommands
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FixedIpCommands(object):
 | 
			
		||||
    """Class for managing fixed ip."""
 | 
			
		||||
@@ -985,6 +987,7 @@ class ImageCommands(object):
 | 
			
		||||
 | 
			
		||||
CATEGORIES = [
 | 
			
		||||
    ('user', UserCommands),
 | 
			
		||||
    ('account', AccountCommands),
 | 
			
		||||
    ('project', ProjectCommands),
 | 
			
		||||
    ('role', RoleCommands),
 | 
			
		||||
    ('shell', ShellCommands),
 | 
			
		||||
 
 | 
			
		||||
@@ -562,7 +562,7 @@ class CloudController(object):
 | 
			
		||||
        if context.is_admin:
 | 
			
		||||
            v['status'] = '%s (%s, %s, %s, %s)' % (
 | 
			
		||||
                volume['status'],
 | 
			
		||||
                volume['user_id'],
 | 
			
		||||
                volume['project_id'],
 | 
			
		||||
                volume['host'],
 | 
			
		||||
                instance_data,
 | 
			
		||||
                volume['mountpoint'])
 | 
			
		||||
 
 | 
			
		||||
@@ -27,6 +27,7 @@ import webob.exc
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
from nova.api.openstack import accounts
 | 
			
		||||
from nova.api.openstack import faults
 | 
			
		||||
from nova.api.openstack import backup_schedules
 | 
			
		||||
from nova.api.openstack import consoles
 | 
			
		||||
@@ -34,6 +35,7 @@ from nova.api.openstack import flavors
 | 
			
		||||
from nova.api.openstack import images
 | 
			
		||||
from nova.api.openstack import servers
 | 
			
		||||
from nova.api.openstack import shared_ip_groups
 | 
			
		||||
from nova.api.openstack import users
 | 
			
		||||
from nova.api.openstack import zones
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -77,8 +79,8 @@ class APIRouter(wsgi.Router):
 | 
			
		||||
 | 
			
		||||
            server_members['pause'] = 'POST'
 | 
			
		||||
            server_members['unpause'] = 'POST'
 | 
			
		||||
            server_members["diagnostics"] = "GET"
 | 
			
		||||
            server_members["actions"] = "GET"
 | 
			
		||||
            server_members['diagnostics'] = 'GET'
 | 
			
		||||
            server_members['actions'] = 'GET'
 | 
			
		||||
            server_members['suspend'] = 'POST'
 | 
			
		||||
            server_members['resume'] = 'POST'
 | 
			
		||||
            server_members['rescue'] = 'POST'
 | 
			
		||||
@@ -87,8 +89,15 @@ class APIRouter(wsgi.Router):
 | 
			
		||||
            server_members['inject_network_info'] = 'POST'
 | 
			
		||||
 | 
			
		||||
            mapper.resource("zone", "zones", controller=zones.Controller(),
 | 
			
		||||
                        collection={'detail': 'GET', 'info': 'GET'}),
 | 
			
		||||
 | 
			
		||||
            mapper.resource("user", "users", controller=users.Controller(),
 | 
			
		||||
                        collection={'detail': 'GET'})
 | 
			
		||||
 | 
			
		||||
            mapper.resource("account", "accounts",
 | 
			
		||||
                            controller=accounts.Controller(),
 | 
			
		||||
                            collection={'detail': 'GET'})
 | 
			
		||||
 | 
			
		||||
        mapper.resource("server", "servers", controller=servers.Controller(),
 | 
			
		||||
                        collection={'detail': 'GET'},
 | 
			
		||||
                        member=server_members)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										85
									
								
								nova/api/openstack/accounts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								nova/api/openstack/accounts.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,85 @@
 | 
			
		||||
# Copyright 2011 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import common
 | 
			
		||||
 | 
			
		||||
from nova import exception
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
 | 
			
		||||
from nova.auth import manager
 | 
			
		||||
from nova.api.openstack import faults
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
LOG = logging.getLogger('nova.api.openstack')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _translate_keys(account):
 | 
			
		||||
    return dict(id=account.id,
 | 
			
		||||
                name=account.name,
 | 
			
		||||
                description=account.description,
 | 
			
		||||
                manager=account.project_manager_id)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Controller(wsgi.Controller):
 | 
			
		||||
 | 
			
		||||
    _serialization_metadata = {
 | 
			
		||||
        'application/xml': {
 | 
			
		||||
            "attributes": {
 | 
			
		||||
                "account": ["id", "name", "description", "manager"]}}}
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.manager = manager.AuthManager()
 | 
			
		||||
 | 
			
		||||
    def _check_admin(self, context):
 | 
			
		||||
        """We cannot depend on the db layer to check for admin access
 | 
			
		||||
           for the auth manager, so we do it here"""
 | 
			
		||||
        if not context.is_admin:
 | 
			
		||||
            raise exception.NotAuthorized(_("Not admin user."))
 | 
			
		||||
 | 
			
		||||
    def index(self, req):
 | 
			
		||||
        raise faults.Fault(exc.HTTPNotImplemented())
 | 
			
		||||
 | 
			
		||||
    def detail(self, req):
 | 
			
		||||
        raise faults.Fault(exc.HTTPNotImplemented())
 | 
			
		||||
 | 
			
		||||
    def show(self, req, id):
 | 
			
		||||
        """Return data about the given account id"""
 | 
			
		||||
        account = self.manager.get_project(id)
 | 
			
		||||
        return dict(account=_translate_keys(account))
 | 
			
		||||
 | 
			
		||||
    def delete(self, req, id):
 | 
			
		||||
        self._check_admin(req.environ['nova.context'])
 | 
			
		||||
        self.manager.delete_project(id)
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
    def create(self, req):
 | 
			
		||||
        """We use update with create-or-update semantics
 | 
			
		||||
           because the id comes from an external source"""
 | 
			
		||||
        raise faults.Fault(exc.HTTPNotImplemented())
 | 
			
		||||
 | 
			
		||||
    def update(self, req, id):
 | 
			
		||||
        """This is really create or update."""
 | 
			
		||||
        self._check_admin(req.environ['nova.context'])
 | 
			
		||||
        env = self._deserialize(req.body, req.get_content_type())
 | 
			
		||||
        description = env['account'].get('description')
 | 
			
		||||
        manager = env['account'].get('manager')
 | 
			
		||||
        try:
 | 
			
		||||
            account = self.manager.get_project(id)
 | 
			
		||||
            self.manager.modify_project(id, manager, description)
 | 
			
		||||
        except exception.NotFound:
 | 
			
		||||
            account = self.manager.create_project(id, manager, description)
 | 
			
		||||
        return dict(account=_translate_keys(account))
 | 
			
		||||
@@ -28,11 +28,13 @@ from nova import context
 | 
			
		||||
from nova import db
 | 
			
		||||
from nova import exception
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import manager
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
from nova.api.openstack import faults
 | 
			
		||||
 | 
			
		||||
LOG = logging.getLogger('nova.api.openstack')
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -50,14 +52,23 @@ class AuthMiddleware(wsgi.Middleware):
 | 
			
		||||
    def __call__(self, req):
 | 
			
		||||
        if not self.has_authentication(req):
 | 
			
		||||
            return self.authenticate(req)
 | 
			
		||||
 | 
			
		||||
        user = self.get_user_by_authentication(req)
 | 
			
		||||
 | 
			
		||||
        accounts = self.auth.get_projects(user=user)
 | 
			
		||||
        if not user:
 | 
			
		||||
            return faults.Fault(webob.exc.HTTPUnauthorized())
 | 
			
		||||
 | 
			
		||||
        project = self.auth.get_project(FLAGS.default_project)
 | 
			
		||||
        req.environ['nova.context'] = context.RequestContext(user, project)
 | 
			
		||||
        if accounts:
 | 
			
		||||
            #we are punting on this til auth is settled,
 | 
			
		||||
            #and possibly til api v1.1 (mdragon)
 | 
			
		||||
            account = accounts[0]
 | 
			
		||||
        else:
 | 
			
		||||
            return faults.Fault(webob.exc.HTTPUnauthorized())
 | 
			
		||||
 | 
			
		||||
        if not self.auth.is_admin(user) and \
 | 
			
		||||
           not self.auth.is_project_member(user, account):
 | 
			
		||||
            return faults.Fault(webob.exc.HTTPUnauthorized())
 | 
			
		||||
 | 
			
		||||
        req.environ['nova.context'] = context.RequestContext(user, account)
 | 
			
		||||
        return self.application
 | 
			
		||||
 | 
			
		||||
    def has_authentication(self, req):
 | 
			
		||||
@@ -125,14 +136,15 @@ class AuthMiddleware(wsgi.Middleware):
 | 
			
		||||
        """
 | 
			
		||||
        ctxt = context.get_admin_context()
 | 
			
		||||
        user = self.auth.get_user_from_access_key(key)
 | 
			
		||||
 | 
			
		||||
        if user and user.name == username:
 | 
			
		||||
            token_hash = hashlib.sha1('%s%s%f' % (username, key,
 | 
			
		||||
                time.time())).hexdigest()
 | 
			
		||||
            token_dict = {}
 | 
			
		||||
            token_dict['token_hash'] = token_hash
 | 
			
		||||
            token_dict['cdn_management_url'] = ''
 | 
			
		||||
            # Same as auth url, e.g. http://foo.org:8774/baz/v1.0
 | 
			
		||||
            token_dict['server_management_url'] = req.url
 | 
			
		||||
            os_url = req.url
 | 
			
		||||
            token_dict['server_management_url'] = os_url
 | 
			
		||||
            token_dict['storage_url'] = ''
 | 
			
		||||
            token_dict['user_id'] = user.id
 | 
			
		||||
            token = self.db.auth_token_create(ctxt, token_dict)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										93
									
								
								nova/api/openstack/users.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								nova/api/openstack/users.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,93 @@
 | 
			
		||||
# Copyright 2011 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import common
 | 
			
		||||
 | 
			
		||||
from nova import exception
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
 | 
			
		||||
from nova.auth import manager
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
LOG = logging.getLogger('nova.api.openstack')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _translate_keys(user):
 | 
			
		||||
    return dict(id=user.id,
 | 
			
		||||
                name=user.name,
 | 
			
		||||
                access=user.access,
 | 
			
		||||
                secret=user.secret,
 | 
			
		||||
                admin=user.admin)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Controller(wsgi.Controller):
 | 
			
		||||
 | 
			
		||||
    _serialization_metadata = {
 | 
			
		||||
        'application/xml': {
 | 
			
		||||
            "attributes": {
 | 
			
		||||
                "user": ["id", "name", "access", "secret", "admin"]}}}
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.manager = manager.AuthManager()
 | 
			
		||||
 | 
			
		||||
    def _check_admin(self, context):
 | 
			
		||||
        """We cannot depend on the db layer to check for admin access
 | 
			
		||||
           for the auth manager, so we do it here"""
 | 
			
		||||
        if not context.is_admin:
 | 
			
		||||
            raise exception.NotAuthorized(_("Not admin user"))
 | 
			
		||||
 | 
			
		||||
    def index(self, req):
 | 
			
		||||
        """Return all users in brief"""
 | 
			
		||||
        users = self.manager.get_users()
 | 
			
		||||
        users = common.limited(users, req)
 | 
			
		||||
        users = [_translate_keys(user) for user in users]
 | 
			
		||||
        return dict(users=users)
 | 
			
		||||
 | 
			
		||||
    def detail(self, req):
 | 
			
		||||
        """Return all users in detail"""
 | 
			
		||||
        return self.index(req)
 | 
			
		||||
 | 
			
		||||
    def show(self, req, id):
 | 
			
		||||
        """Return data about the given user id"""
 | 
			
		||||
        user = self.manager.get_user(id)
 | 
			
		||||
        return dict(user=_translate_keys(user))
 | 
			
		||||
 | 
			
		||||
    def delete(self, req, id):
 | 
			
		||||
        self._check_admin(req.environ['nova.context'])
 | 
			
		||||
        self.manager.delete_user(id)
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
    def create(self, req):
 | 
			
		||||
        self._check_admin(req.environ['nova.context'])
 | 
			
		||||
        env = self._deserialize(req.body, req.get_content_type())
 | 
			
		||||
        is_admin = env['user'].get('admin') in ('T', 'True', True)
 | 
			
		||||
        name = env['user'].get('name')
 | 
			
		||||
        access = env['user'].get('access')
 | 
			
		||||
        secret = env['user'].get('secret')
 | 
			
		||||
        user = self.manager.create_user(name, access, secret, is_admin)
 | 
			
		||||
        return dict(user=_translate_keys(user))
 | 
			
		||||
 | 
			
		||||
    def update(self, req, id):
 | 
			
		||||
        self._check_admin(req.environ['nova.context'])
 | 
			
		||||
        env = self._deserialize(req.body, req.get_content_type())
 | 
			
		||||
        is_admin = env['user'].get('admin')
 | 
			
		||||
        if is_admin is not None:
 | 
			
		||||
            is_admin = is_admin in ('T', 'True', True)
 | 
			
		||||
        access = env['user'].get('access')
 | 
			
		||||
        secret = env['user'].get('secret')
 | 
			
		||||
        self.manager.modify_user(id, access, secret, is_admin)
 | 
			
		||||
        return dict(user=_translate_keys(self.manager.get_user(id)))
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
# Copyright 2010 OpenStack LLC.
 | 
			
		||||
# Copyright 2011 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
@@ -18,6 +18,7 @@ import common
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
from nova import db
 | 
			
		||||
from nova.scheduler import api
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
@@ -32,6 +33,10 @@ def _filter_keys(item, keys):
 | 
			
		||||
    return dict((k, v) for k, v in item.iteritems() if k in keys)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _exclude_keys(item, keys):
 | 
			
		||||
    return dict((k, v) for k, v in item.iteritems() if k not in keys)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _scrub_zone(zone):
 | 
			
		||||
    return _filter_keys(zone, ('id', 'api_url'))
 | 
			
		||||
 | 
			
		||||
@@ -41,19 +46,30 @@ class Controller(wsgi.Controller):
 | 
			
		||||
    _serialization_metadata = {
 | 
			
		||||
        'application/xml': {
 | 
			
		||||
            "attributes": {
 | 
			
		||||
                "zone": ["id", "api_url"]}}}
 | 
			
		||||
                "zone": ["id", "api_url", "name", "capabilities"]}}}
 | 
			
		||||
 | 
			
		||||
    def index(self, req):
 | 
			
		||||
        """Return all zones in brief"""
 | 
			
		||||
        items = db.zone_get_all(req.environ['nova.context'])
 | 
			
		||||
        # Ask the ZoneManager in the Scheduler for most recent data,
 | 
			
		||||
        # or fall-back to the database ...
 | 
			
		||||
        items = api.API().get_zone_list(req.environ['nova.context'])
 | 
			
		||||
        if not items:
 | 
			
		||||
            items = db.zone_get_all(req.environ['nova.context'])
 | 
			
		||||
 | 
			
		||||
        items = common.limited(items, req)
 | 
			
		||||
        items = [_scrub_zone(item) for item in items]
 | 
			
		||||
        items = [_exclude_keys(item, ['username', 'password'])
 | 
			
		||||
                      for item in items]
 | 
			
		||||
        return dict(zones=items)
 | 
			
		||||
 | 
			
		||||
    def detail(self, req):
 | 
			
		||||
        """Return all zones in detail"""
 | 
			
		||||
        return self.index(req)
 | 
			
		||||
 | 
			
		||||
    def info(self, req):
 | 
			
		||||
        """Return name and capabilities for this zone."""
 | 
			
		||||
        return dict(zone=dict(name=FLAGS.zone_name,
 | 
			
		||||
                    capabilities=FLAGS.zone_capabilities))
 | 
			
		||||
 | 
			
		||||
    def show(self, req, id):
 | 
			
		||||
        """Return data about the given zone id"""
 | 
			
		||||
        zone_id = int(id)
 | 
			
		||||
 
 | 
			
		||||
@@ -125,6 +125,11 @@ class API(base.Base):
 | 
			
		||||
                raise quota.QuotaError(msg, "MetadataLimitExceeded")
 | 
			
		||||
 | 
			
		||||
        image = self.image_service.show(context, image_id)
 | 
			
		||||
 | 
			
		||||
        os_type = None
 | 
			
		||||
        if 'properties' in image and 'os_type' in image['properties']:
 | 
			
		||||
            os_type = image['properties']['os_type']
 | 
			
		||||
 | 
			
		||||
        if kernel_id is None:
 | 
			
		||||
            kernel_id = image['properties'].get('kernel_id', None)
 | 
			
		||||
        if ramdisk_id is None:
 | 
			
		||||
@@ -165,6 +170,7 @@ class API(base.Base):
 | 
			
		||||
            'image_id': image_id,
 | 
			
		||||
            'kernel_id': kernel_id or '',
 | 
			
		||||
            'ramdisk_id': ramdisk_id or '',
 | 
			
		||||
            'state': 0,
 | 
			
		||||
            'state_description': 'scheduling',
 | 
			
		||||
            'user_id': context.user_id,
 | 
			
		||||
            'project_id': context.project_id,
 | 
			
		||||
@@ -180,7 +186,8 @@ class API(base.Base):
 | 
			
		||||
            'key_data': key_data,
 | 
			
		||||
            'locked': False,
 | 
			
		||||
            'metadata': metadata,
 | 
			
		||||
            'availability_zone': availability_zone}
 | 
			
		||||
            'availability_zone': availability_zone,
 | 
			
		||||
            'os_type': os_type}
 | 
			
		||||
        elevated = context.elevated()
 | 
			
		||||
        instances = []
 | 
			
		||||
        LOG.debug(_("Going to run %s instances..."), num_instances)
 | 
			
		||||
 
 | 
			
		||||
@@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload
 | 
			
		||||
from sqlalchemy.orm import joinedload_all
 | 
			
		||||
from sqlalchemy.sql import exists
 | 
			
		||||
from sqlalchemy.sql import func
 | 
			
		||||
from sqlalchemy.sql.expression import literal_column
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
 | 
			
		||||
@@ -647,18 +648,17 @@ def fixed_ip_disassociate(context, address):
 | 
			
		||||
@require_admin_context
 | 
			
		||||
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    # NOTE(vish): The nested select is because sqlite doesn't support
 | 
			
		||||
    #             JOINs in UPDATEs.
 | 
			
		||||
    result = session.execute('UPDATE fixed_ips SET instance_id = NULL, '
 | 
			
		||||
                                                  'leased = 0 '
 | 
			
		||||
                             'WHERE network_id IN (SELECT id FROM networks '
 | 
			
		||||
                                                  'WHERE host = :host) '
 | 
			
		||||
                             'AND updated_at < :time '
 | 
			
		||||
                             'AND instance_id IS NOT NULL '
 | 
			
		||||
                             'AND allocated = 0',
 | 
			
		||||
                    {'host': host,
 | 
			
		||||
                     'time': time})
 | 
			
		||||
    return result.rowcount
 | 
			
		||||
    inner_q = session.query(models.Network.id).\
 | 
			
		||||
                      filter_by(host=host).\
 | 
			
		||||
                      subquery()
 | 
			
		||||
    result = session.query(models.FixedIp).\
 | 
			
		||||
                     filter(models.FixedIp.network_id.in_(inner_q)).\
 | 
			
		||||
                     filter(models.FixedIp.updated_at < time).\
 | 
			
		||||
                     filter(models.FixedIp.instance_id != None).\
 | 
			
		||||
                     filter_by(allocated=0).\
 | 
			
		||||
                     update({'instance_id': None,
 | 
			
		||||
                             'leased': 0})
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_admin_context
 | 
			
		||||
@@ -771,14 +771,16 @@ def instance_data_get_for_project(context, project_id):
 | 
			
		||||
def instance_destroy(context, instance_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        session.execute('update instances set deleted=1,'
 | 
			
		||||
                        'deleted_at=:at where id=:id',
 | 
			
		||||
                        {'id': instance_id,
 | 
			
		||||
                         'at': datetime.datetime.utcnow()})
 | 
			
		||||
        session.execute('update security_group_instance_association '
 | 
			
		||||
                        'set deleted=1,deleted_at=:at where instance_id=:id',
 | 
			
		||||
                        {'id': instance_id,
 | 
			
		||||
                         'at': datetime.datetime.utcnow()})
 | 
			
		||||
        session.query(models.Instance).\
 | 
			
		||||
                filter_by(id=instance_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
        session.query(models.SecurityGroupInstanceAssociation).\
 | 
			
		||||
                filter_by(instance_id=instance_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_context
 | 
			
		||||
@@ -1059,9 +1061,11 @@ def key_pair_destroy_all_by_user(context, user_id):
 | 
			
		||||
    authorize_user_context(context, user_id)
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        # TODO(vish): do we have to use sql here?
 | 
			
		||||
        session.execute('update key_pairs set deleted=1 where user_id=:id',
 | 
			
		||||
                        {'id': user_id})
 | 
			
		||||
        session.query(models.KeyPair).\
 | 
			
		||||
                filter_by(user_id=user_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_context
 | 
			
		||||
@@ -1181,7 +1185,9 @@ def network_disassociate(context, network_id):
 | 
			
		||||
@require_admin_context
 | 
			
		||||
def network_disassociate_all(context):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    session.execute('update networks set project_id=NULL')
 | 
			
		||||
    session.query(models.Network).\
 | 
			
		||||
            update({'project_id': None,
 | 
			
		||||
                    'updated_at': literal_column('updated_at')})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_context
 | 
			
		||||
@@ -1563,15 +1569,17 @@ def volume_data_get_for_project(context, project_id):
 | 
			
		||||
def volume_destroy(context, volume_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        # TODO(vish): do we have to use sql here?
 | 
			
		||||
        session.execute('update volumes set deleted=1 where id=:id',
 | 
			
		||||
                        {'id': volume_id})
 | 
			
		||||
        session.execute('update export_devices set volume_id=NULL '
 | 
			
		||||
                        'where volume_id=:id',
 | 
			
		||||
                        {'id': volume_id})
 | 
			
		||||
        session.execute('update iscsi_targets set volume_id=NULL '
 | 
			
		||||
                        'where volume_id=:id',
 | 
			
		||||
                        {'id': volume_id})
 | 
			
		||||
        session.query(models.Volume).\
 | 
			
		||||
                filter_by(id=volume_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
        session.query(models.ExportDevice).\
 | 
			
		||||
                filter_by(volume_id=volume_id).\
 | 
			
		||||
                update({'volume_id': None})
 | 
			
		||||
        session.query(models.IscsiTarget).\
 | 
			
		||||
                filter_by(volume_id=volume_id).\
 | 
			
		||||
                update({'volume_id': None})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_admin_context
 | 
			
		||||
@@ -1803,17 +1811,21 @@ def security_group_create(context, values):
 | 
			
		||||
def security_group_destroy(context, security_group_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        # TODO(vish): do we have to use sql here?
 | 
			
		||||
        session.execute('update security_groups set deleted=1 where id=:id',
 | 
			
		||||
                        {'id': security_group_id})
 | 
			
		||||
        session.execute('update security_group_instance_association '
 | 
			
		||||
                        'set deleted=1,deleted_at=:at '
 | 
			
		||||
                        'where security_group_id=:id',
 | 
			
		||||
                        {'id': security_group_id,
 | 
			
		||||
                         'at': datetime.datetime.utcnow()})
 | 
			
		||||
        session.execute('update security_group_rules set deleted=1 '
 | 
			
		||||
                        'where group_id=:id',
 | 
			
		||||
                        {'id': security_group_id})
 | 
			
		||||
        session.query(models.SecurityGroup).\
 | 
			
		||||
                filter_by(id=security_group_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
        session.query(models.SecurityGroupInstanceAssociation).\
 | 
			
		||||
                filter_by(security_group_id=security_group_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
        session.query(models.SecurityGroupIngressRule).\
 | 
			
		||||
                filter_by(group_id=security_group_id).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_context
 | 
			
		||||
@@ -1821,9 +1833,14 @@ def security_group_destroy_all(context, session=None):
 | 
			
		||||
    if not session:
 | 
			
		||||
        session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        # TODO(vish): do we have to use sql here?
 | 
			
		||||
        session.execute('update security_groups set deleted=1')
 | 
			
		||||
        session.execute('update security_group_rules set deleted=1')
 | 
			
		||||
        session.query(models.SecurityGroup).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
        session.query(models.SecurityGroupIngressRule).\
 | 
			
		||||
                update({'deleted': 1,
 | 
			
		||||
                        'deleted_at': datetime.datetime.utcnow(),
 | 
			
		||||
                        'updated_at': literal_column('updated_at')})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
###################
 | 
			
		||||
@@ -1952,12 +1969,15 @@ def user_create(_context, values):
 | 
			
		||||
def user_delete(context, id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        session.execute('delete from user_project_association '
 | 
			
		||||
                        'where user_id=:id', {'id': id})
 | 
			
		||||
        session.execute('delete from user_role_association '
 | 
			
		||||
                        'where user_id=:id', {'id': id})
 | 
			
		||||
        session.execute('delete from user_project_role_association '
 | 
			
		||||
                        'where user_id=:id', {'id': id})
 | 
			
		||||
        session.query(models.UserProjectAssociation).\
 | 
			
		||||
                filter_by(user_id=id).\
 | 
			
		||||
                delete()
 | 
			
		||||
        session.query(models.UserRoleAssociation).\
 | 
			
		||||
                filter_by(user_id=id).\
 | 
			
		||||
                delete()
 | 
			
		||||
        session.query(models.UserProjectRoleAssociation).\
 | 
			
		||||
                filter_by(user_id=id).\
 | 
			
		||||
                delete()
 | 
			
		||||
        user_ref = user_get(context, id, session=session)
 | 
			
		||||
        session.delete(user_ref)
 | 
			
		||||
 | 
			
		||||
@@ -2014,8 +2034,11 @@ def project_get_by_user(context, user_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    user = session.query(models.User).\
 | 
			
		||||
                   filter_by(deleted=can_read_deleted(context)).\
 | 
			
		||||
                   filter_by(id=user_id).\
 | 
			
		||||
                   options(joinedload_all('projects')).\
 | 
			
		||||
                   first()
 | 
			
		||||
    if not user:
 | 
			
		||||
        raise exception.NotFound(_('Invalid user_id %s') % user_id)
 | 
			
		||||
    return user.projects
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -2048,10 +2071,12 @@ def project_update(context, project_id, values):
 | 
			
		||||
def project_delete(context, id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        session.execute('delete from user_project_association '
 | 
			
		||||
                        'where project_id=:id', {'id': id})
 | 
			
		||||
        session.execute('delete from user_project_role_association '
 | 
			
		||||
                        'where project_id=:id', {'id': id})
 | 
			
		||||
        session.query(models.UserProjectAssociation).\
 | 
			
		||||
                filter_by(project_id=id).\
 | 
			
		||||
                delete()
 | 
			
		||||
        session.query(models.UserProjectRoleAssociation).\
 | 
			
		||||
                filter_by(project_id=id).\
 | 
			
		||||
                delete()
 | 
			
		||||
        project_ref = project_get(context, id, session=session)
 | 
			
		||||
        session.delete(project_ref)
 | 
			
		||||
 | 
			
		||||
@@ -2076,11 +2101,11 @@ def user_get_roles_for_project(context, user_id, project_id):
 | 
			
		||||
def user_remove_project_role(context, user_id, project_id, role):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        session.execute('delete from user_project_role_association where '
 | 
			
		||||
                        'user_id=:user_id and project_id=:project_id and '
 | 
			
		||||
                        'role=:role', {'user_id': user_id,
 | 
			
		||||
                                       'project_id': project_id,
 | 
			
		||||
                                       'role': role})
 | 
			
		||||
        session.query(models.UserProjectRoleAssociation).\
 | 
			
		||||
                filter_by(user_id=user_id).\
 | 
			
		||||
                filter_by(project_id=project_id).\
 | 
			
		||||
                filter_by(role=role).\
 | 
			
		||||
                delete()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def user_remove_role(context, user_id, role):
 | 
			
		||||
@@ -2231,8 +2256,9 @@ def console_delete(context, console_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        # consoles are meant to be transient. (mdragon)
 | 
			
		||||
        session.execute('delete from consoles '
 | 
			
		||||
                        'where id=:id', {'id': console_id})
 | 
			
		||||
        session.query(models.Console).\
 | 
			
		||||
                filter_by(id=console_id).\
 | 
			
		||||
                delete()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def console_get_by_pool_instance(context, pool_id, instance_id):
 | 
			
		||||
@@ -2388,8 +2414,9 @@ def zone_update(context, zone_id, values):
 | 
			
		||||
def zone_delete(context, zone_id):
 | 
			
		||||
    session = get_session()
 | 
			
		||||
    with session.begin():
 | 
			
		||||
        session.execute('delete from zones '
 | 
			
		||||
                        'where id=:id', {'id': zone_id})
 | 
			
		||||
        session.query(models.Zone).\
 | 
			
		||||
                filter_by(id=zone_id).\
 | 
			
		||||
                delete()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@require_admin_context
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
 | 
			
		||||
 | 
			
		||||
# Copyright 2010 OpenStack LLC.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
from sqlalchemy import *
 | 
			
		||||
from sqlalchemy.sql import text
 | 
			
		||||
from migrate import *
 | 
			
		||||
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
meta = MetaData()
 | 
			
		||||
 | 
			
		||||
instances = Table('instances', meta,
 | 
			
		||||
        Column('id', Integer(),  primary_key=True, nullable=False),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
instances_os_type = Column('os_type',
 | 
			
		||||
                           String(length=255, convert_unicode=False,
 | 
			
		||||
                                  assert_unicode=None, unicode_error=None,
 | 
			
		||||
                                  _warn_on_bytestring=False),
 | 
			
		||||
                           nullable=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def upgrade(migrate_engine):
 | 
			
		||||
    # Upgrade operations go here. Don't create your own engine;
 | 
			
		||||
    # bind migrate_engine to your metadata
 | 
			
		||||
    meta.bind = migrate_engine
 | 
			
		||||
 | 
			
		||||
    instances.create_column(instances_os_type)
 | 
			
		||||
    migrate_engine.execute(instances.update()\
 | 
			
		||||
                           .where(instances.c.os_type == None)\
 | 
			
		||||
                           .values(os_type='linux'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def downgrade(migrate_engine):
 | 
			
		||||
    meta.bind = migrate_engine
 | 
			
		||||
 | 
			
		||||
    instances.drop_column('os_type')
 | 
			
		||||
@@ -231,6 +231,8 @@ class Instance(BASE, NovaBase):
 | 
			
		||||
    launched_on = Column(Text)
 | 
			
		||||
    locked = Column(Boolean)
 | 
			
		||||
 | 
			
		||||
    os_type = Column(String(255))
 | 
			
		||||
 | 
			
		||||
    # TODO(vish): see Ewan's email about state improvements, probably
 | 
			
		||||
    #             should be in a driver base class or some such
 | 
			
		||||
    # vmstate_state = running, halted, suspended, paused
 | 
			
		||||
 
 | 
			
		||||
@@ -356,3 +356,7 @@ DEFINE_string('host', socket.gethostname(),
 | 
			
		||||
 | 
			
		||||
DEFINE_string('node_availability_zone', 'nova',
 | 
			
		||||
              'availability zone of this node')
 | 
			
		||||
 | 
			
		||||
DEFINE_string('zone_name', 'nova', 'name of this zone')
 | 
			
		||||
DEFINE_string('zone_capabilities', 'kypervisor:xenserver;os:linux',
 | 
			
		||||
              'Key/Value tags which represent capabilities of this zone')
 | 
			
		||||
 
 | 
			
		||||
@@ -236,25 +236,32 @@ class S3ImageService(service.BaseImageService):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
 | 
			
		||||
                       cloud_private_key, decrypted_filename):
 | 
			
		||||
        key, err = utils.execute(
 | 
			
		||||
                'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
 | 
			
		||||
                process_input=encrypted_key,
 | 
			
		||||
                check_exit_code=False)
 | 
			
		||||
        key, err = utils.execute('openssl',
 | 
			
		||||
                                 'rsautl',
 | 
			
		||||
                                 '-decrypt',
 | 
			
		||||
                                 '-inkey', '%s' % cloud_private_key,
 | 
			
		||||
                                 process_input=encrypted_key,
 | 
			
		||||
                                 check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt private key: %s")
 | 
			
		||||
                                  % err)
 | 
			
		||||
        iv, err = utils.execute(
 | 
			
		||||
                'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
 | 
			
		||||
                process_input=encrypted_iv,
 | 
			
		||||
                check_exit_code=False)
 | 
			
		||||
        iv, err = utils.execute('openssl',
 | 
			
		||||
                                'rsautl',
 | 
			
		||||
                                '-decrypt',
 | 
			
		||||
                                '-inkey', '%s' % cloud_private_key,
 | 
			
		||||
                                process_input=encrypted_iv,
 | 
			
		||||
                                check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt initialization "
 | 
			
		||||
                                    "vector: %s") % err)
 | 
			
		||||
 | 
			
		||||
        _out, err = utils.execute(
 | 
			
		||||
                'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
 | 
			
		||||
                 % (encrypted_filename, key, iv, decrypted_filename),
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _out, err = utils.execute('openssl', 'enc',
 | 
			
		||||
                                  '-d', '-aes-128-cbc',
 | 
			
		||||
                                  '-in', '%s' % (encrypted_filename,),
 | 
			
		||||
                                  '-K', '%s' % (key,),
 | 
			
		||||
                                  '-iv', '%s' % (iv,),
 | 
			
		||||
                                  '-out', '%s' % (decrypted_filename,),
 | 
			
		||||
                                  check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt image file "
 | 
			
		||||
                                    "%(image_file)s: %(err)s") %
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,11 @@
 | 
			
		||||
Implements vlans, bridges, and iptables rules using linux utilities.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import inspect
 | 
			
		||||
import os
 | 
			
		||||
import calendar
 | 
			
		||||
 | 
			
		||||
from eventlet import semaphore
 | 
			
		||||
 | 
			
		||||
from nova import db
 | 
			
		||||
from nova import exception
 | 
			
		||||
@@ -25,7 +29,6 @@ from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import utils
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
LOG = logging.getLogger("nova.linux_net")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -52,10 +55,10 @@ flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
 | 
			
		||||
                        'location of nova-dhcpbridge')
 | 
			
		||||
flags.DEFINE_string('routing_source_ip', '$my_ip',
 | 
			
		||||
                    'Public IP of network host')
 | 
			
		||||
flags.DEFINE_bool('use_nova_chains', False,
 | 
			
		||||
                  'use the nova_ routing chains instead of default')
 | 
			
		||||
flags.DEFINE_string('input_chain', 'INPUT',
 | 
			
		||||
                    'chain to add nova_input to')
 | 
			
		||||
flags.DEFINE_integer('dhcp_lease_time', 120,
 | 
			
		||||
                     'Lifetime of a DHCP lease')
 | 
			
		||||
 | 
			
		||||
flags.DEFINE_string('dns_server', None,
 | 
			
		||||
                    'if set, uses specific dns server for dnsmasq')
 | 
			
		||||
@@ -63,79 +66,332 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
 | 
			
		||||
                    'dmz range that should be accepted')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
binary_name = os.path.basename(inspect.stack()[-1][1])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IptablesRule(object):
 | 
			
		||||
    """An iptables rule
 | 
			
		||||
 | 
			
		||||
    You shouldn't need to use this class directly, it's only used by
 | 
			
		||||
    IptablesManager
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, chain, rule, wrap=True, top=False):
 | 
			
		||||
        self.chain = chain
 | 
			
		||||
        self.rule = rule
 | 
			
		||||
        self.wrap = wrap
 | 
			
		||||
        self.top = top
 | 
			
		||||
 | 
			
		||||
    def __eq__(self, other):
 | 
			
		||||
        return ((self.chain == other.chain) and
 | 
			
		||||
                (self.rule == other.rule) and
 | 
			
		||||
                (self.top == other.top) and
 | 
			
		||||
                (self.wrap == other.wrap))
 | 
			
		||||
 | 
			
		||||
    def __ne__(self, other):
 | 
			
		||||
        return not self == other
 | 
			
		||||
 | 
			
		||||
    def __str__(self):
 | 
			
		||||
        if self.wrap:
 | 
			
		||||
            chain = '%s-%s' % (binary_name, self.chain)
 | 
			
		||||
        else:
 | 
			
		||||
            chain = self.chain
 | 
			
		||||
        return '-A %s %s' % (chain, self.rule)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IptablesTable(object):
 | 
			
		||||
    """An iptables table"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.rules = []
 | 
			
		||||
        self.chains = set()
 | 
			
		||||
        self.unwrapped_chains = set()
 | 
			
		||||
 | 
			
		||||
    def add_chain(self, name, wrap=True):
 | 
			
		||||
        """Adds a named chain to the table
 | 
			
		||||
 | 
			
		||||
        The chain name is wrapped to be unique for the component creating
 | 
			
		||||
        it, so different components of Nova can safely create identically
 | 
			
		||||
        named chains without interfering with one another.
 | 
			
		||||
 | 
			
		||||
        At the moment, its wrapped name is <binary name>-<chain name>,
 | 
			
		||||
        so if nova-compute creates a chain named "OUTPUT", it'll actually
 | 
			
		||||
        end up named "nova-compute-OUTPUT".
 | 
			
		||||
        """
 | 
			
		||||
        if wrap:
 | 
			
		||||
            self.chains.add(name)
 | 
			
		||||
        else:
 | 
			
		||||
            self.unwrapped_chains.add(name)
 | 
			
		||||
 | 
			
		||||
    def remove_chain(self, name, wrap=True):
 | 
			
		||||
        """Remove named chain
 | 
			
		||||
 | 
			
		||||
        This removal "cascades". All rule in the chain are removed, as are
 | 
			
		||||
        all rules in other chains that jump to it.
 | 
			
		||||
 | 
			
		||||
        If the chain is not found, this is merely logged.
 | 
			
		||||
        """
 | 
			
		||||
        if wrap:
 | 
			
		||||
            chain_set = self.chains
 | 
			
		||||
        else:
 | 
			
		||||
            chain_set = self.unwrapped_chains
 | 
			
		||||
 | 
			
		||||
        if name not in chain_set:
 | 
			
		||||
            LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
 | 
			
		||||
                      name)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        chain_set.remove(name)
 | 
			
		||||
        self.rules = filter(lambda r: r.chain != name, self.rules)
 | 
			
		||||
 | 
			
		||||
        if wrap:
 | 
			
		||||
            jump_snippet = '-j %s-%s' % (binary_name, name)
 | 
			
		||||
        else:
 | 
			
		||||
            jump_snippet = '-j %s' % (name,)
 | 
			
		||||
 | 
			
		||||
        self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
 | 
			
		||||
 | 
			
		||||
    def add_rule(self, chain, rule, wrap=True, top=False):
 | 
			
		||||
        """Add a rule to the table
 | 
			
		||||
 | 
			
		||||
        This is just like what you'd feed to iptables, just without
 | 
			
		||||
        the "-A <chain name>" bit at the start.
 | 
			
		||||
 | 
			
		||||
        However, if you need to jump to one of your wrapped chains,
 | 
			
		||||
        prepend its name with a '$' which will ensure the wrapping
 | 
			
		||||
        is applied correctly.
 | 
			
		||||
        """
 | 
			
		||||
        if wrap and chain not in self.chains:
 | 
			
		||||
            raise ValueError(_("Unknown chain: %r") % chain)
 | 
			
		||||
 | 
			
		||||
        if '$' in rule:
 | 
			
		||||
            rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
 | 
			
		||||
 | 
			
		||||
        self.rules.append(IptablesRule(chain, rule, wrap, top))
 | 
			
		||||
 | 
			
		||||
    def _wrap_target_chain(self, s):
 | 
			
		||||
        if s.startswith('$'):
 | 
			
		||||
            return '%s-%s' % (binary_name, s[1:])
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
    def remove_rule(self, chain, rule, wrap=True, top=False):
 | 
			
		||||
        """Remove a rule from a chain
 | 
			
		||||
 | 
			
		||||
        Note: The rule must be exactly identical to the one that was added.
 | 
			
		||||
        You cannot switch arguments around like you can with the iptables
 | 
			
		||||
        CLI tool.
 | 
			
		||||
        """
 | 
			
		||||
        try:
 | 
			
		||||
            self.rules.remove(IptablesRule(chain, rule, wrap, top))
 | 
			
		||||
        except ValueError:
 | 
			
		||||
            LOG.debug(_("Tried to remove rule that wasn't there:"
 | 
			
		||||
                        " %(chain)r %(rule)r %(wrap)r %(top)r"),
 | 
			
		||||
                      {'chain': chain, 'rule': rule,
 | 
			
		||||
                       'top': top, 'wrap': wrap})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IptablesManager(object):
 | 
			
		||||
    """Wrapper for iptables
 | 
			
		||||
 | 
			
		||||
    See IptablesTable for some usage docs
 | 
			
		||||
 | 
			
		||||
    A number of chains are set up to begin with.
 | 
			
		||||
 | 
			
		||||
    First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
 | 
			
		||||
    name is not wrapped, so it's shared between the various nova workers. It's
 | 
			
		||||
    intended for rules that need to live at the top of the FORWARD and OUTPUT
 | 
			
		||||
    chains. It's in both the ipv4 and ipv6 set of tables.
 | 
			
		||||
 | 
			
		||||
    For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
 | 
			
		||||
    wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
 | 
			
		||||
    wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
 | 
			
		||||
    "local" which is jumped to from nova-filter-top.
 | 
			
		||||
 | 
			
		||||
    For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
 | 
			
		||||
    wrapped in the same was as the builtin filter chains. Additionally, there's
 | 
			
		||||
    a snat chain that is applied after the POSTROUTING chain.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, execute=None):
 | 
			
		||||
        if not execute:
 | 
			
		||||
            if FLAGS.fake_network:
 | 
			
		||||
                self.execute = lambda *args, **kwargs: ('', '')
 | 
			
		||||
            else:
 | 
			
		||||
                self.execute = utils.execute
 | 
			
		||||
        else:
 | 
			
		||||
            self.execute = execute
 | 
			
		||||
 | 
			
		||||
        self.ipv4 = {'filter': IptablesTable(),
 | 
			
		||||
                     'nat': IptablesTable()}
 | 
			
		||||
        self.ipv6 = {'filter': IptablesTable()}
 | 
			
		||||
 | 
			
		||||
        # Add a nova-filter-top chain. It's intended to be shared
 | 
			
		||||
        # among the various nova components. It sits at the very top
 | 
			
		||||
        # of FORWARD and OUTPUT.
 | 
			
		||||
        for tables in [self.ipv4, self.ipv6]:
 | 
			
		||||
            tables['filter'].add_chain('nova-filter-top', wrap=False)
 | 
			
		||||
            tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
 | 
			
		||||
                                      wrap=False, top=True)
 | 
			
		||||
            tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
 | 
			
		||||
                                      wrap=False, top=True)
 | 
			
		||||
 | 
			
		||||
            tables['filter'].add_chain('local')
 | 
			
		||||
            tables['filter'].add_rule('nova-filter-top', '-j $local',
 | 
			
		||||
                                      wrap=False)
 | 
			
		||||
 | 
			
		||||
        # Wrap the builtin chains
 | 
			
		||||
        builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
 | 
			
		||||
                              'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
 | 
			
		||||
                          6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
 | 
			
		||||
 | 
			
		||||
        for ip_version in builtin_chains:
 | 
			
		||||
            if ip_version == 4:
 | 
			
		||||
                tables = self.ipv4
 | 
			
		||||
            elif ip_version == 6:
 | 
			
		||||
                tables = self.ipv6
 | 
			
		||||
 | 
			
		||||
            for table, chains in builtin_chains[ip_version].iteritems():
 | 
			
		||||
                for chain in chains:
 | 
			
		||||
                    tables[table].add_chain(chain)
 | 
			
		||||
                    tables[table].add_rule(chain, '-j $%s' % (chain,),
 | 
			
		||||
                                           wrap=False)
 | 
			
		||||
 | 
			
		||||
        # Add a nova-postrouting-bottom chain. It's intended to be shared
 | 
			
		||||
        # among the various nova components. We set it as the last chain
 | 
			
		||||
        # of POSTROUTING chain.
 | 
			
		||||
        self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
 | 
			
		||||
        self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
 | 
			
		||||
                                  wrap=False)
 | 
			
		||||
 | 
			
		||||
        # We add a snat chain to the shared nova-postrouting-bottom chain
 | 
			
		||||
        # so that it's applied last.
 | 
			
		||||
        self.ipv4['nat'].add_chain('snat')
 | 
			
		||||
        self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
 | 
			
		||||
                                  wrap=False)
 | 
			
		||||
 | 
			
		||||
        # And then we add a floating-snat chain and jump to first thing in
 | 
			
		||||
        # the snat chain.
 | 
			
		||||
        self.ipv4['nat'].add_chain('floating-snat')
 | 
			
		||||
        self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
 | 
			
		||||
 | 
			
		||||
        self.semaphore = semaphore.Semaphore()
 | 
			
		||||
 | 
			
		||||
    @utils.synchronized('iptables')
 | 
			
		||||
    def apply(self):
 | 
			
		||||
        """Apply the current in-memory set of iptables rules
 | 
			
		||||
 | 
			
		||||
        This will blow away any rules left over from previous runs of the
 | 
			
		||||
        same component of Nova, and replace them with our current set of
 | 
			
		||||
        rules. This happens atomically, thanks to iptables-restore.
 | 
			
		||||
 | 
			
		||||
        We wrap the call in a semaphore lock, so that we don't race with
 | 
			
		||||
        ourselves. In the event of a race with another component running
 | 
			
		||||
        an iptables-* command at the same time, we retry up to 5 times.
 | 
			
		||||
        """
 | 
			
		||||
        with self.semaphore:
 | 
			
		||||
            s = [('iptables', self.ipv4)]
 | 
			
		||||
            if FLAGS.use_ipv6:
 | 
			
		||||
                s += [('ip6tables', self.ipv6)]
 | 
			
		||||
 | 
			
		||||
            for cmd, tables in s:
 | 
			
		||||
                for table in tables:
 | 
			
		||||
                    current_table, _ = self.execute('sudo',
 | 
			
		||||
                                                    '%s-save' % (cmd,),
 | 
			
		||||
                                                    '-t', '%s' % (table,),
 | 
			
		||||
                                                    attempts=5)
 | 
			
		||||
                    current_lines = current_table.split('\n')
 | 
			
		||||
                    new_filter = self._modify_rules(current_lines,
 | 
			
		||||
                                                    tables[table])
 | 
			
		||||
                    self.execute('sudo', '%s-restore' % (cmd,),
 | 
			
		||||
                                 process_input='\n'.join(new_filter),
 | 
			
		||||
                                 attempts=5)
 | 
			
		||||
 | 
			
		||||
    def _modify_rules(self, current_lines, table, binary=None):
 | 
			
		||||
        unwrapped_chains = table.unwrapped_chains
 | 
			
		||||
        chains = table.chains
 | 
			
		||||
        rules = table.rules
 | 
			
		||||
 | 
			
		||||
        # Remove any trace of our rules
 | 
			
		||||
        new_filter = filter(lambda line: binary_name not in line,
 | 
			
		||||
                            current_lines)
 | 
			
		||||
 | 
			
		||||
        seen_chains = False
 | 
			
		||||
        rules_index = 0
 | 
			
		||||
        for rules_index, rule in enumerate(new_filter):
 | 
			
		||||
            if not seen_chains:
 | 
			
		||||
                if rule.startswith(':'):
 | 
			
		||||
                    seen_chains = True
 | 
			
		||||
            else:
 | 
			
		||||
                if not rule.startswith(':'):
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
        our_rules = []
 | 
			
		||||
        for rule in rules:
 | 
			
		||||
            rule_str = str(rule)
 | 
			
		||||
            if rule.top:
 | 
			
		||||
                # rule.top == True means we want this rule to be at the top.
 | 
			
		||||
                # Further down, we weed out duplicates from the bottom of the
 | 
			
		||||
                # list, so here we remove the dupes ahead of time.
 | 
			
		||||
                new_filter = filter(lambda s: s.strip() != rule_str.strip(),
 | 
			
		||||
                                    new_filter)
 | 
			
		||||
            our_rules += [rule_str]
 | 
			
		||||
 | 
			
		||||
        new_filter[rules_index:rules_index] = our_rules
 | 
			
		||||
 | 
			
		||||
        new_filter[rules_index:rules_index] = [':%s - [0:0]' % \
 | 
			
		||||
                                               (name,) \
 | 
			
		||||
                                               for name in unwrapped_chains]
 | 
			
		||||
        new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \
 | 
			
		||||
                                               (binary_name, name,) \
 | 
			
		||||
                                               for name in chains]
 | 
			
		||||
 | 
			
		||||
        seen_lines = set()
 | 
			
		||||
 | 
			
		||||
        def _weed_out_duplicates(line):
 | 
			
		||||
            line = line.strip()
 | 
			
		||||
            if line in seen_lines:
 | 
			
		||||
                return False
 | 
			
		||||
            else:
 | 
			
		||||
                seen_lines.add(line)
 | 
			
		||||
                return True
 | 
			
		||||
 | 
			
		||||
        # We filter duplicates, letting the *last* occurrence take
 | 
			
		||||
        # precendence.
 | 
			
		||||
        new_filter.reverse()
 | 
			
		||||
        new_filter = filter(_weed_out_duplicates, new_filter)
 | 
			
		||||
        new_filter.reverse()
 | 
			
		||||
        return new_filter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
iptables_manager = IptablesManager()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def metadata_forward():
 | 
			
		||||
    """Create forwarding rule for metadata"""
 | 
			
		||||
    _confirm_rule("PREROUTING", '-t', 'nat', '-s', '0.0.0.0/0',
 | 
			
		||||
             '-d', '169.254.169.254/32', '-p', 'tcp', '-m', 'tcp',
 | 
			
		||||
             '--dport', '80', '-j', 'DNAT',
 | 
			
		||||
             '--to-destination',
 | 
			
		||||
             '%s:%s' % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
 | 
			
		||||
    iptables_manager.ipv4['nat'].add_rule("PREROUTING",
 | 
			
		||||
                                          "-s 0.0.0.0/0 -d 169.254.169.254/32 "
 | 
			
		||||
                                          "-p tcp -m tcp --dport 80 -j DNAT "
 | 
			
		||||
                                          "--to-destination %s:%s" % \
 | 
			
		||||
                                          (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
 | 
			
		||||
    iptables_manager.apply()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_host():
 | 
			
		||||
    """Basic networking setup goes here"""
 | 
			
		||||
 | 
			
		||||
    if FLAGS.use_nova_chains:
 | 
			
		||||
        _execute('sudo', 'iptables', '-N', 'nova_input', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-D', FLAGS.input_chain,
 | 
			
		||||
                 '-j', 'nova_input',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-A', FLAGS.input_chain,
 | 
			
		||||
                 '-j', 'nova_input')
 | 
			
		||||
        _execute('sudo', 'iptables', '-N', 'nova_forward',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward')
 | 
			
		||||
        _execute('sudo', 'iptables', '-N', 'nova_output',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-D', 'OUTPUT', '-j', 'nova_output',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-A', 'OUTPUT', '-j', 'nova_output')
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_prerouting',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING',
 | 
			
		||||
                 '-j', 'nova_prerouting', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING',
 | 
			
		||||
                 '-j', 'nova_prerouting')
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_postrouting',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
 | 
			
		||||
                 '-j', 'nova_postrouting', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
 | 
			
		||||
                 '-j', 'nova_postrouting')
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_snatting',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
 | 
			
		||||
                 '-j nova_snatting', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
 | 
			
		||||
                 '-j', 'nova_snatting')
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_output',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT',
 | 
			
		||||
                 '-j nova_output', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT',
 | 
			
		||||
                 '-j', 'nova_output')
 | 
			
		||||
    else:
 | 
			
		||||
        # NOTE(vish): This makes it easy to ensure snatting rules always
 | 
			
		||||
        #             come after the accept rules in the postrouting chain
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-N', 'SNATTING',
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
 | 
			
		||||
                 '-j', 'SNATTING', check_exit_code=False)
 | 
			
		||||
        _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
 | 
			
		||||
                 '-j', 'SNATTING')
 | 
			
		||||
 | 
			
		||||
    # NOTE(devcamcar): Cloud public SNAT entries and the default
 | 
			
		||||
    # SNAT rule for outbound traffic.
 | 
			
		||||
    _confirm_rule("SNATTING", '-t', 'nat', '-s', FLAGS.fixed_range,
 | 
			
		||||
             '-j', 'SNAT', '--to-source', FLAGS.routing_source_ip,
 | 
			
		||||
             append=True)
 | 
			
		||||
    iptables_manager.ipv4['nat'].add_rule("snat",
 | 
			
		||||
                                          "-s %s -j SNAT --to-source %s" % \
 | 
			
		||||
                                           (FLAGS.fixed_range,
 | 
			
		||||
                                            FLAGS.routing_source_ip))
 | 
			
		||||
 | 
			
		||||
    _confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range,
 | 
			
		||||
                  '-d', FLAGS.dmz_cidr, '-j', 'ACCEPT')
 | 
			
		||||
    _confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range,
 | 
			
		||||
                  '-d', FLAGS.fixed_range, '-j', 'ACCEPT')
 | 
			
		||||
    iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
 | 
			
		||||
                                          "-s %s -d %s -j ACCEPT" % \
 | 
			
		||||
                                          (FLAGS.fixed_range, FLAGS.dmz_cidr))
 | 
			
		||||
 | 
			
		||||
    iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
 | 
			
		||||
                                          "-s %(range)s -d %(range)s "
 | 
			
		||||
                                          "-j ACCEPT" % \
 | 
			
		||||
                                          {'range': FLAGS.fixed_range})
 | 
			
		||||
    iptables_manager.apply()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def bind_floating_ip(floating_ip, check_exit_code=True):
 | 
			
		||||
@@ -153,31 +409,36 @@ def unbind_floating_ip(floating_ip):
 | 
			
		||||
 | 
			
		||||
def ensure_vlan_forward(public_ip, port, private_ip):
 | 
			
		||||
    """Sets up forwarding rules for vlan"""
 | 
			
		||||
    _confirm_rule("FORWARD", '-d', private_ip, '-p', 'udp',
 | 
			
		||||
                  '--dport', '1194', '-j', 'ACCEPT')
 | 
			
		||||
    _confirm_rule("PREROUTING", '-t', 'nat', '-d', public_ip, '-p', 'udp',
 | 
			
		||||
                  '--dport', port, '-j', 'DNAT', '--to', '%s:1194'
 | 
			
		||||
                  % private_ip)
 | 
			
		||||
    iptables_manager.ipv4['filter'].add_rule("FORWARD",
 | 
			
		||||
                                             "-d %s -p udp "
 | 
			
		||||
                                             "--dport 1194 "
 | 
			
		||||
                                             "-j ACCEPT" % private_ip)
 | 
			
		||||
    iptables_manager.ipv4['nat'].add_rule("PREROUTING",
 | 
			
		||||
                                          "-d %s -p udp "
 | 
			
		||||
                                          "--dport %s -j DNAT --to %s:1194" %
 | 
			
		||||
                                          (public_ip, port, private_ip))
 | 
			
		||||
    iptables_manager.apply()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_floating_forward(floating_ip, fixed_ip):
 | 
			
		||||
    """Ensure floating ip forwarding rule"""
 | 
			
		||||
    _confirm_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
 | 
			
		||||
                  '--to', fixed_ip)
 | 
			
		||||
    _confirm_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
 | 
			
		||||
                  '--to', fixed_ip)
 | 
			
		||||
    _confirm_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT',
 | 
			
		||||
                  '--to', floating_ip)
 | 
			
		||||
    for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
 | 
			
		||||
        iptables_manager.ipv4['nat'].add_rule(chain, rule)
 | 
			
		||||
    iptables_manager.apply()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remove_floating_forward(floating_ip, fixed_ip):
 | 
			
		||||
    """Remove forwarding for floating ip"""
 | 
			
		||||
    _remove_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
 | 
			
		||||
                 '--to', fixed_ip)
 | 
			
		||||
    _remove_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
 | 
			
		||||
                 '--to', fixed_ip)
 | 
			
		||||
    _remove_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT',
 | 
			
		||||
                 '--to', floating_ip)
 | 
			
		||||
    for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
 | 
			
		||||
        iptables_manager.ipv4['nat'].remove_rule(chain, rule)
 | 
			
		||||
    iptables_manager.apply()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def floating_forward_rules(floating_ip, fixed_ip):
 | 
			
		||||
    return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
 | 
			
		||||
            ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
 | 
			
		||||
            ("floating-snat",
 | 
			
		||||
             "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
 | 
			
		||||
@@ -216,7 +477,7 @@ def ensure_bridge(bridge, interface, net_attrs=None):
 | 
			
		||||
        _execute('sudo', 'brctl', 'setfd', bridge, 0)
 | 
			
		||||
        # _execute("sudo brctl setageing %s 10" % bridge)
 | 
			
		||||
        _execute('sudo', 'brctl', 'stp', bridge, 'off')
 | 
			
		||||
        _execute('sudo', 'ip', 'link', 'set', bridge, up)
 | 
			
		||||
        _execute('sudo', 'ip', 'link', 'set', bridge, 'up')
 | 
			
		||||
    if net_attrs:
 | 
			
		||||
        # NOTE(vish): The ip for dnsmasq has to be the first address on the
 | 
			
		||||
        #             bridge for it to respond to reqests properly
 | 
			
		||||
@@ -255,11 +516,9 @@ def ensure_bridge(bridge, interface, net_attrs=None):
 | 
			
		||||
        for line in out.split("\n"):
 | 
			
		||||
            fields = line.split()
 | 
			
		||||
            if fields and fields[0] == "inet":
 | 
			
		||||
                params = ' '.join(fields[1:-1])
 | 
			
		||||
                _execute('sudo', 'ip', 'addr',
 | 
			
		||||
                         'del', params, 'dev', fields[-1])
 | 
			
		||||
                _execute('sudo', 'ip', 'addr',
 | 
			
		||||
                         'add', params, 'dev', bridge)
 | 
			
		||||
                params = fields[1:-1]
 | 
			
		||||
                _execute(*_ip_bridge_cmd('del', params, fields[-1]))
 | 
			
		||||
                _execute(*_ip_bridge_cmd('add', params, bridge))
 | 
			
		||||
        if gateway:
 | 
			
		||||
            _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
 | 
			
		||||
        out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
 | 
			
		||||
@@ -269,23 +528,25 @@ def ensure_bridge(bridge, interface, net_attrs=None):
 | 
			
		||||
                           "enslave it to bridge %s.\n" % (interface, bridge)):
 | 
			
		||||
            raise exception.Error("Failed to add interface: %s" % err)
 | 
			
		||||
 | 
			
		||||
    if FLAGS.use_nova_chains:
 | 
			
		||||
        (out, err) = _execute('sudo', 'iptables', '-N', 'nova_forward',
 | 
			
		||||
                              check_exit_code=False)
 | 
			
		||||
        if err != 'iptables: Chain already exists.\n':
 | 
			
		||||
            # NOTE(vish): chain didn't exist link chain
 | 
			
		||||
            _execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward',
 | 
			
		||||
                     check_exit_code=False)
 | 
			
		||||
            _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward')
 | 
			
		||||
    iptables_manager.ipv4['filter'].add_rule("FORWARD",
 | 
			
		||||
                                             "--in-interface %s -j ACCEPT" % \
 | 
			
		||||
                                             bridge)
 | 
			
		||||
    iptables_manager.ipv4['filter'].add_rule("FORWARD",
 | 
			
		||||
                                             "--out-interface %s -j ACCEPT" % \
 | 
			
		||||
                                             bridge)
 | 
			
		||||
 | 
			
		||||
    _confirm_rule("FORWARD", '--in-interface', bridge, '-j', 'ACCEPT')
 | 
			
		||||
    _confirm_rule("FORWARD", '--out-interface', bridge, '-j', 'ACCEPT')
 | 
			
		||||
    _execute('sudo', 'iptables', '-N', 'nova-local', check_exit_code=False)
 | 
			
		||||
    _confirm_rule("FORWARD", '-j', 'nova-local')
 | 
			
		||||
 | 
			
		||||
def get_dhcp_leases(context, network_id):
 | 
			
		||||
    """Return a network's hosts config in dnsmasq leasefile format"""
 | 
			
		||||
    hosts = []
 | 
			
		||||
    for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
 | 
			
		||||
                                                            network_id):
 | 
			
		||||
        hosts.append(_host_lease(fixed_ip_ref))
 | 
			
		||||
    return '\n'.join(hosts)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_dhcp_hosts(context, network_id):
 | 
			
		||||
    """Get a string containing a network's hosts config in dnsmasq format"""
 | 
			
		||||
    """Get a string containing a network's hosts config in dhcp-host format"""
 | 
			
		||||
    hosts = []
 | 
			
		||||
    for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
 | 
			
		||||
                                                            network_id):
 | 
			
		||||
@@ -330,7 +591,7 @@ def update_dhcp(context, network_id):
 | 
			
		||||
    env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
 | 
			
		||||
           'DNSMASQ_INTERFACE': network_ref['bridge']}
 | 
			
		||||
    command = _dnsmasq_cmd(network_ref)
 | 
			
		||||
    _execute(command, addl_env=env)
 | 
			
		||||
    _execute(*command, addl_env=env)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def update_ra(context, network_id):
 | 
			
		||||
@@ -370,14 +631,30 @@ interface %s
 | 
			
		||||
        else:
 | 
			
		||||
            LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
 | 
			
		||||
    command = _ra_cmd(network_ref)
 | 
			
		||||
    _execute(command)
 | 
			
		||||
    _execute(*command)
 | 
			
		||||
    db.network_update(context, network_id,
 | 
			
		||||
                      {"ra_server":
 | 
			
		||||
                       utils.get_my_linklocal(network_ref['bridge'])})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _host_lease(fixed_ip_ref):
 | 
			
		||||
    """Return a host string for an address in leasefile format"""
 | 
			
		||||
    instance_ref = fixed_ip_ref['instance']
 | 
			
		||||
    if instance_ref['updated_at']:
 | 
			
		||||
        timestamp = instance_ref['updated_at']
 | 
			
		||||
    else:
 | 
			
		||||
        timestamp = instance_ref['created_at']
 | 
			
		||||
 | 
			
		||||
    seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
 | 
			
		||||
 | 
			
		||||
    return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
 | 
			
		||||
                              instance_ref['mac_address'],
 | 
			
		||||
                              fixed_ip_ref['address'],
 | 
			
		||||
                              instance_ref['hostname'] or '*')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _host_dhcp(fixed_ip_ref):
 | 
			
		||||
    """Return a host string for an address"""
 | 
			
		||||
    """Return a host string for an address in dhcp-host format"""
 | 
			
		||||
    instance_ref = fixed_ip_ref['instance']
 | 
			
		||||
    return "%s,%s.%s,%s" % (instance_ref['mac_address'],
 | 
			
		||||
                                   instance_ref['hostname'],
 | 
			
		||||
@@ -401,53 +678,32 @@ def _device_exists(device):
 | 
			
		||||
    return not err
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _confirm_rule(chain, *cmd, **kwargs):
 | 
			
		||||
    append = kwargs.get('append', False)
 | 
			
		||||
    """Delete and re-add iptables rule"""
 | 
			
		||||
    if FLAGS.use_nova_chains:
 | 
			
		||||
        chain = "nova_%s" % chain.lower()
 | 
			
		||||
    if append:
 | 
			
		||||
        loc = "-A"
 | 
			
		||||
    else:
 | 
			
		||||
        loc = "-I"
 | 
			
		||||
    _execute('sudo', 'iptables', '--delete', chain, *cmd,
 | 
			
		||||
             check_exit_code=False)
 | 
			
		||||
    _execute('sudo', 'iptables', loc, chain, *cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _remove_rule(chain, *cmd):
 | 
			
		||||
    """Remove iptables rule"""
 | 
			
		||||
    if FLAGS.use_nova_chains:
 | 
			
		||||
        chain = "%s" % chain.lower()
 | 
			
		||||
    _execute('sudo', 'iptables', '--delete', chain, *cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _dnsmasq_cmd(net):
 | 
			
		||||
    """Builds dnsmasq command"""
 | 
			
		||||
    cmd = ['sudo -E dnsmasq',
 | 
			
		||||
           ' --strict-order',
 | 
			
		||||
           ' --bind-interfaces',
 | 
			
		||||
           ' --conf-file=',
 | 
			
		||||
           ' --domain=%s' % FLAGS.dhcp_domain,
 | 
			
		||||
           ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
 | 
			
		||||
           ' --listen-address=%s' % net['gateway'],
 | 
			
		||||
           ' --except-interface=lo',
 | 
			
		||||
           ' --dhcp-range=%s,static,120s' % net['dhcp_start'],
 | 
			
		||||
           ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
 | 
			
		||||
           ' --dhcp-script=%s' % FLAGS.dhcpbridge,
 | 
			
		||||
           ' --leasefile-ro']
 | 
			
		||||
    cmd = ['sudo', '-E', 'dnsmasq',
 | 
			
		||||
           '--strict-order',
 | 
			
		||||
           '--bind-interfaces',
 | 
			
		||||
           '--conf-file=',
 | 
			
		||||
           '--domain=%s' % FLAGS.dhcp_domain,
 | 
			
		||||
           '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
 | 
			
		||||
           '--listen-address=%s' % net['gateway'],
 | 
			
		||||
           '--except-interface=lo',
 | 
			
		||||
           '--dhcp-range=%s,static,120s' % net['dhcp_start'],
 | 
			
		||||
           '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
 | 
			
		||||
           '--dhcp-script=%s' % FLAGS.dhcpbridge,
 | 
			
		||||
           '--leasefile-ro']
 | 
			
		||||
    if FLAGS.dns_server:
 | 
			
		||||
        cmd.append(' -h -R --server=%s' % FLAGS.dns_server)
 | 
			
		||||
    return ''.join(cmd)
 | 
			
		||||
        cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
 | 
			
		||||
    return cmd
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _ra_cmd(net):
 | 
			
		||||
    """Builds radvd command"""
 | 
			
		||||
    cmd = ['sudo -E radvd',
 | 
			
		||||
#           ' -u nobody',
 | 
			
		||||
           ' -C %s' % _ra_file(net['bridge'], 'conf'),
 | 
			
		||||
           ' -p %s' % _ra_file(net['bridge'], 'pid')]
 | 
			
		||||
    return ''.join(cmd)
 | 
			
		||||
    cmd = ['sudo', '-E', 'radvd',
 | 
			
		||||
#           '-u', 'nobody',
 | 
			
		||||
           '-C', '%s' % _ra_file(net['bridge'], 'conf'),
 | 
			
		||||
           '-p', '%s' % _ra_file(net['bridge'], 'pid')]
 | 
			
		||||
    return cmd
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _stop_dnsmasq(network):
 | 
			
		||||
@@ -509,3 +765,12 @@ def _ra_pid_for(bridge):
 | 
			
		||||
    if os.path.exists(pid_file):
 | 
			
		||||
        with open(pid_file, 'r') as f:
 | 
			
		||||
            return int(f.read())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _ip_bridge_cmd(action, params, device):
 | 
			
		||||
    """Build commands to add/del ips to bridges/devices"""
 | 
			
		||||
 | 
			
		||||
    cmd = ['sudo', 'ip', 'addr', action]
 | 
			
		||||
    cmd.extend(params)
 | 
			
		||||
    cmd.extend(['dev', device])
 | 
			
		||||
    return cmd
 | 
			
		||||
 
 | 
			
		||||
@@ -253,25 +253,34 @@ class Image(object):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
 | 
			
		||||
                      cloud_private_key, decrypted_filename):
 | 
			
		||||
        key, err = utils.execute(
 | 
			
		||||
                'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
 | 
			
		||||
                process_input=encrypted_key,
 | 
			
		||||
                check_exit_code=False)
 | 
			
		||||
        key, err = utils.execute('openssl',
 | 
			
		||||
                                 'rsautl',
 | 
			
		||||
                                 '-decrypt',
 | 
			
		||||
                                 '-inkey', '%s' % cloud_private_key,
 | 
			
		||||
                                 process_input=encrypted_key,
 | 
			
		||||
                                 check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt private key: %s")
 | 
			
		||||
                                  % err)
 | 
			
		||||
        iv, err = utils.execute(
 | 
			
		||||
                'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
 | 
			
		||||
                process_input=encrypted_iv,
 | 
			
		||||
                check_exit_code=False)
 | 
			
		||||
        iv, err = utils.execute('openssl',
 | 
			
		||||
                                'rsautl',
 | 
			
		||||
                                '-decrypt',
 | 
			
		||||
                                '-inkey', '%s' % cloud_private_key,
 | 
			
		||||
                                process_input=encrypted_iv,
 | 
			
		||||
                                check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt initialization "
 | 
			
		||||
                                    "vector: %s") % err)
 | 
			
		||||
 | 
			
		||||
        _out, err = utils.execute(
 | 
			
		||||
                'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
 | 
			
		||||
                 % (encrypted_filename, key, iv, decrypted_filename),
 | 
			
		||||
                 check_exit_code=False)
 | 
			
		||||
        _out, err = utils.execute('openssl',
 | 
			
		||||
                                  'enc',
 | 
			
		||||
                                  '-d',
 | 
			
		||||
                                  '-aes-128-cbc',
 | 
			
		||||
                                  '-in', '%s' % (encrypted_filename,),
 | 
			
		||||
                                  '-K', '%s' % (key,),
 | 
			
		||||
                                  '-iv', '%s' % (iv,),
 | 
			
		||||
                                  '-out', '%s' % (decrypted_filename,),
 | 
			
		||||
                                  check_exit_code=False)
 | 
			
		||||
        if err:
 | 
			
		||||
            raise exception.Error(_("Failed to decrypt image file "
 | 
			
		||||
                                    "%(image_file)s: %(err)s") %
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										49
									
								
								nova/scheduler/api.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								nova/scheduler/api.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
# Copyright (c) 2011 Openstack, LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Handles all requests relating to schedulers.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import rpc
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
LOG = logging.getLogger('nova.scheduler.api')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class API(object):
 | 
			
		||||
    """API for interacting with the scheduler."""
 | 
			
		||||
 | 
			
		||||
    def _call_scheduler(self, method, context, params=None):
 | 
			
		||||
        """Generic handler for RPC calls to the scheduler.
 | 
			
		||||
 | 
			
		||||
        :param params: Optional dictionary of arguments to be passed to the
 | 
			
		||||
                       scheduler worker
 | 
			
		||||
 | 
			
		||||
        :retval: Result returned by scheduler worker
 | 
			
		||||
        """
 | 
			
		||||
        if not params:
 | 
			
		||||
            params = {}
 | 
			
		||||
        queue = FLAGS.scheduler_topic
 | 
			
		||||
        kwargs = {'method': method, 'args': params}
 | 
			
		||||
        return rpc.call(context, queue, kwargs)
 | 
			
		||||
 | 
			
		||||
    def get_zone_list(self, context):
 | 
			
		||||
        items = self._call_scheduler('get_zone_list', context)
 | 
			
		||||
        for item in items:
 | 
			
		||||
            item['api_url'] = item['api_url'].replace('\\/', '/')
 | 
			
		||||
        return items
 | 
			
		||||
@@ -29,6 +29,7 @@ from nova import log as logging
 | 
			
		||||
from nova import manager
 | 
			
		||||
from nova import rpc
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova.scheduler import zone_manager
 | 
			
		||||
 | 
			
		||||
LOG = logging.getLogger('nova.scheduler.manager')
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
@@ -43,12 +44,21 @@ class SchedulerManager(manager.Manager):
 | 
			
		||||
        if not scheduler_driver:
 | 
			
		||||
            scheduler_driver = FLAGS.scheduler_driver
 | 
			
		||||
        self.driver = utils.import_object(scheduler_driver)
 | 
			
		||||
        self.zone_manager = zone_manager.ZoneManager()
 | 
			
		||||
        super(SchedulerManager, self).__init__(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    def __getattr__(self, key):
 | 
			
		||||
        """Converts all method calls to use the schedule method"""
 | 
			
		||||
        return functools.partial(self._schedule, key)
 | 
			
		||||
 | 
			
		||||
    def periodic_tasks(self, context=None):
 | 
			
		||||
        """Poll child zones periodically to get status."""
 | 
			
		||||
        self.zone_manager.ping(context)
 | 
			
		||||
 | 
			
		||||
    def get_zone_list(self, context=None):
 | 
			
		||||
        """Get a list of zones from the ZoneManager."""
 | 
			
		||||
        return self.zone_manager.get_zone_list()
 | 
			
		||||
 | 
			
		||||
    def _schedule(self, method, context, topic, *args, **kwargs):
 | 
			
		||||
        """Tries to call schedule_* method on the driver to retrieve host.
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										143
									
								
								nova/scheduler/zone_manager.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								nova/scheduler/zone_manager.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,143 @@
 | 
			
		||||
# Copyright (c) 2011 Openstack, LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
ZoneManager oversees all communications with child Zones.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import novaclient
 | 
			
		||||
import thread
 | 
			
		||||
import traceback
 | 
			
		||||
 | 
			
		||||
from datetime import datetime
 | 
			
		||||
from eventlet import greenpool
 | 
			
		||||
 | 
			
		||||
from nova import db
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
flags.DEFINE_integer('zone_db_check_interval', 60,
 | 
			
		||||
                    'Seconds between getting fresh zone info from db.')
 | 
			
		||||
flags.DEFINE_integer('zone_failures_to_offline', 3,
 | 
			
		||||
             'Number of consecutive errors before marking zone offline')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ZoneState(object):
 | 
			
		||||
    """Holds the state of all connected child zones."""
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.is_active = True
 | 
			
		||||
        self.name = None
 | 
			
		||||
        self.capabilities = None
 | 
			
		||||
        self.attempt = 0
 | 
			
		||||
        self.last_seen = datetime.min
 | 
			
		||||
        self.last_exception = None
 | 
			
		||||
        self.last_exception_time = None
 | 
			
		||||
 | 
			
		||||
    def update_credentials(self, zone):
 | 
			
		||||
        """Update zone credentials from db"""
 | 
			
		||||
        self.zone_id = zone.id
 | 
			
		||||
        self.api_url = zone.api_url
 | 
			
		||||
        self.username = zone.username
 | 
			
		||||
        self.password = zone.password
 | 
			
		||||
 | 
			
		||||
    def update_metadata(self, zone_metadata):
 | 
			
		||||
        """Update zone metadata after successful communications with
 | 
			
		||||
           child zone."""
 | 
			
		||||
        self.last_seen = datetime.now()
 | 
			
		||||
        self.attempt = 0
 | 
			
		||||
        self.name = zone_metadata["name"]
 | 
			
		||||
        self.capabilities = zone_metadata["capabilities"]
 | 
			
		||||
        self.is_active = True
 | 
			
		||||
 | 
			
		||||
    def to_dict(self):
 | 
			
		||||
        return dict(name=self.name, capabilities=self.capabilities,
 | 
			
		||||
                    is_active=self.is_active, api_url=self.api_url,
 | 
			
		||||
                    id=self.zone_id)
 | 
			
		||||
 | 
			
		||||
    def log_error(self, exception):
 | 
			
		||||
        """Something went wrong. Check to see if zone should be
 | 
			
		||||
           marked as offline."""
 | 
			
		||||
        self.last_exception = exception
 | 
			
		||||
        self.last_exception_time = datetime.now()
 | 
			
		||||
        api_url = self.api_url
 | 
			
		||||
        logging.warning(_("'%(exception)s' error talking to "
 | 
			
		||||
                          "zone %(api_url)s") % locals())
 | 
			
		||||
 | 
			
		||||
        max_errors = FLAGS.zone_failures_to_offline
 | 
			
		||||
        self.attempt += 1
 | 
			
		||||
        if self.attempt >= max_errors:
 | 
			
		||||
            self.is_active = False
 | 
			
		||||
            logging.error(_("No answer from zone %(api_url)s "
 | 
			
		||||
                            "after %(max_errors)d "
 | 
			
		||||
                            "attempts. Marking inactive.") % locals())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _call_novaclient(zone):
 | 
			
		||||
    """Call novaclient. Broken out for testing purposes."""
 | 
			
		||||
    client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
 | 
			
		||||
    return client.zones.info()._info
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _poll_zone(zone):
 | 
			
		||||
    """Eventlet worker to poll a zone."""
 | 
			
		||||
    logging.debug(_("Polling zone: %s") % zone.api_url)
 | 
			
		||||
    try:
 | 
			
		||||
        zone.update_metadata(_call_novaclient(zone))
 | 
			
		||||
    except Exception, e:
 | 
			
		||||
        zone.log_error(traceback.format_exc())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ZoneManager(object):
 | 
			
		||||
    """Keeps the zone states updated."""
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.last_zone_db_check = datetime.min
 | 
			
		||||
        self.zone_states = {}
 | 
			
		||||
        self.green_pool = greenpool.GreenPool()
 | 
			
		||||
 | 
			
		||||
    def get_zone_list(self):
 | 
			
		||||
        """Return the list of zones we know about."""
 | 
			
		||||
        return [zone.to_dict() for zone in self.zone_states.values()]
 | 
			
		||||
 | 
			
		||||
    def _refresh_from_db(self, context):
 | 
			
		||||
        """Make our zone state map match the db."""
 | 
			
		||||
        # Add/update existing zones ...
 | 
			
		||||
        zones = db.zone_get_all(context)
 | 
			
		||||
        existing = self.zone_states.keys()
 | 
			
		||||
        db_keys = []
 | 
			
		||||
        for zone in zones:
 | 
			
		||||
            db_keys.append(zone.id)
 | 
			
		||||
            if zone.id not in existing:
 | 
			
		||||
                self.zone_states[zone.id] = ZoneState()
 | 
			
		||||
            self.zone_states[zone.id].update_credentials(zone)
 | 
			
		||||
 | 
			
		||||
        # Cleanup zones removed from db ...
 | 
			
		||||
        keys = self.zone_states.keys()  # since we're deleting
 | 
			
		||||
        for zone_id in keys:
 | 
			
		||||
            if zone_id not in db_keys:
 | 
			
		||||
                del self.zone_states[zone_id]
 | 
			
		||||
 | 
			
		||||
    def _poll_zones(self, context):
 | 
			
		||||
        """Try to connect to each child zone and get update."""
 | 
			
		||||
        self.green_pool.imap(_poll_zone, self.zone_states.values())
 | 
			
		||||
 | 
			
		||||
    def ping(self, context=None):
 | 
			
		||||
        """Ping should be called periodically to update zone status."""
 | 
			
		||||
        diff = datetime.now() - self.last_zone_db_check
 | 
			
		||||
        if diff.seconds >= FLAGS.zone_db_check_interval:
 | 
			
		||||
            logging.debug(_("Updating zone cache from db."))
 | 
			
		||||
            self.last_zone_db_check = datetime.now()
 | 
			
		||||
            self._refresh_from_db(context)
 | 
			
		||||
        self._poll_zones(context)
 | 
			
		||||
@@ -27,7 +27,6 @@ from paste import urlmap
 | 
			
		||||
from glance import client as glance_client
 | 
			
		||||
from glance.common import exception as glance_exc
 | 
			
		||||
 | 
			
		||||
from nova import auth
 | 
			
		||||
from nova import context
 | 
			
		||||
from nova import exception as exc
 | 
			
		||||
from nova import flags
 | 
			
		||||
@@ -36,6 +35,7 @@ import nova.api.openstack.auth
 | 
			
		||||
from nova.api import openstack
 | 
			
		||||
from nova.api.openstack import auth
 | 
			
		||||
from nova.api.openstack import ratelimiting
 | 
			
		||||
from nova.auth.manager import User, Project
 | 
			
		||||
from nova.image import glance
 | 
			
		||||
from nova.image import local
 | 
			
		||||
from nova.image import service
 | 
			
		||||
@@ -229,19 +229,97 @@ class FakeAuthDatabase(object):
 | 
			
		||||
 | 
			
		||||
class FakeAuthManager(object):
 | 
			
		||||
    auth_data = {}
 | 
			
		||||
    projects = {}
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def clear_fakes(cls):
 | 
			
		||||
        cls.auth_data = {}
 | 
			
		||||
        cls.projects = {}
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def reset_fake_data(cls):
 | 
			
		||||
        cls.auth_data = dict(acc1=User('guy1', 'guy1', 'acc1',
 | 
			
		||||
                                       'fortytwo!', False))
 | 
			
		||||
        cls.projects = dict(testacct=Project('testacct',
 | 
			
		||||
                                             'testacct',
 | 
			
		||||
                                             'guy1',
 | 
			
		||||
                                             'test',
 | 
			
		||||
                                              []))
 | 
			
		||||
 | 
			
		||||
    def add_user(self, key, user):
 | 
			
		||||
        FakeAuthManager.auth_data[key] = user
 | 
			
		||||
 | 
			
		||||
    def get_users(self):
 | 
			
		||||
        return FakeAuthManager.auth_data.values()
 | 
			
		||||
 | 
			
		||||
    def get_user(self, uid):
 | 
			
		||||
        for k, v in FakeAuthManager.auth_data.iteritems():
 | 
			
		||||
            if v.id == uid:
 | 
			
		||||
                return v
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def get_project(self, pid):
 | 
			
		||||
    def delete_user(self, uid):
 | 
			
		||||
        for k, v in FakeAuthManager.auth_data.items():
 | 
			
		||||
            if v.id == uid:
 | 
			
		||||
                del FakeAuthManager.auth_data[k]
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def create_user(self, name, access=None, secret=None, admin=False):
 | 
			
		||||
        u = User(name, name, access, secret, admin)
 | 
			
		||||
        FakeAuthManager.auth_data[access] = u
 | 
			
		||||
        return u
 | 
			
		||||
 | 
			
		||||
    def modify_user(self, user_id, access=None, secret=None, admin=None):
 | 
			
		||||
        user = None
 | 
			
		||||
        for k, v in FakeAuthManager.auth_data.iteritems():
 | 
			
		||||
            if v.id == user_id:
 | 
			
		||||
                user = v
 | 
			
		||||
        if user:
 | 
			
		||||
            user.access = access
 | 
			
		||||
            user.secret = secret
 | 
			
		||||
            if admin is not None:
 | 
			
		||||
                user.admin = admin
 | 
			
		||||
 | 
			
		||||
    def is_admin(self, user):
 | 
			
		||||
        return user.admin
 | 
			
		||||
 | 
			
		||||
    def is_project_member(self, user, project):
 | 
			
		||||
        return ((user.id in project.member_ids) or
 | 
			
		||||
                (user.id == project.project_manager_id))
 | 
			
		||||
 | 
			
		||||
    def create_project(self, name, manager_user, description=None,
 | 
			
		||||
                       member_users=None):
 | 
			
		||||
        member_ids = [User.safe_id(m) for m in member_users] \
 | 
			
		||||
                     if member_users else []
 | 
			
		||||
        p = Project(name, name, User.safe_id(manager_user),
 | 
			
		||||
                                 description, member_ids)
 | 
			
		||||
        FakeAuthManager.projects[name] = p
 | 
			
		||||
        return p
 | 
			
		||||
 | 
			
		||||
    def delete_project(self, pid):
 | 
			
		||||
        if pid in FakeAuthManager.projects:
 | 
			
		||||
            del FakeAuthManager.projects[pid]
 | 
			
		||||
 | 
			
		||||
    def modify_project(self, project, manager_user=None, description=None):
 | 
			
		||||
        p = FakeAuthManager.projects.get(project)
 | 
			
		||||
        p.project_manager_id = User.safe_id(manager_user)
 | 
			
		||||
        p.description = description
 | 
			
		||||
 | 
			
		||||
    def get_project(self, pid):
 | 
			
		||||
        p = FakeAuthManager.projects.get(pid)
 | 
			
		||||
        if p:
 | 
			
		||||
            return p
 | 
			
		||||
        else:
 | 
			
		||||
            raise exc.NotFound
 | 
			
		||||
 | 
			
		||||
    def get_projects(self, user=None):
 | 
			
		||||
        if not user:
 | 
			
		||||
            return FakeAuthManager.projects.values()
 | 
			
		||||
        else:
 | 
			
		||||
            return [p for p in FakeAuthManager.projects.values()
 | 
			
		||||
                    if (user.id in p.member_ids) or
 | 
			
		||||
                       (user.id == p.project_manager_id)]
 | 
			
		||||
 | 
			
		||||
    def get_user_from_access_key(self, key):
 | 
			
		||||
        return FakeAuthManager.auth_data.get(key, None)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										125
									
								
								nova/tests/api/openstack/test_accounts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								nova/tests/api/openstack/test_accounts.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,125 @@
 | 
			
		||||
# Copyright 2010 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
import stubout
 | 
			
		||||
import webob
 | 
			
		||||
 | 
			
		||||
import nova.api
 | 
			
		||||
import nova.api.openstack.auth
 | 
			
		||||
from nova import context
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova.auth.manager import User
 | 
			
		||||
from nova.tests.api.openstack import fakes
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
FLAGS.verbose = True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fake_init(self):
 | 
			
		||||
    self.manager = fakes.FakeAuthManager()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fake_admin_check(self, req):
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AccountsTest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(AccountsTest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        self.stubs.Set(nova.api.openstack.accounts.Controller, '__init__',
 | 
			
		||||
                       fake_init)
 | 
			
		||||
        self.stubs.Set(nova.api.openstack.accounts.Controller, '_check_admin',
 | 
			
		||||
                       fake_admin_check)
 | 
			
		||||
        fakes.FakeAuthManager.clear_fakes()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
        fakes.stub_out_auth(self.stubs)
 | 
			
		||||
 | 
			
		||||
        self.allow_admin = FLAGS.allow_admin_api
 | 
			
		||||
        FLAGS.allow_admin_api = True
 | 
			
		||||
        fakemgr = fakes.FakeAuthManager()
 | 
			
		||||
        joeuser = User('guy1', 'guy1', 'acc1', 'fortytwo!', False)
 | 
			
		||||
        superuser = User('guy2', 'guy2', 'acc2', 'swordfish', True)
 | 
			
		||||
        fakemgr.add_user(joeuser.access, joeuser)
 | 
			
		||||
        fakemgr.add_user(superuser.access, superuser)
 | 
			
		||||
        fakemgr.create_project('test1', joeuser)
 | 
			
		||||
        fakemgr.create_project('test2', superuser)
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        self.stubs.UnsetAll()
 | 
			
		||||
        FLAGS.allow_admin_api = self.allow_admin
 | 
			
		||||
        super(AccountsTest, self).tearDown()
 | 
			
		||||
 | 
			
		||||
    def test_get_account(self):
 | 
			
		||||
        req = webob.Request.blank('/v1.0/accounts/test1')
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res_dict['account']['id'], 'test1')
 | 
			
		||||
        self.assertEqual(res_dict['account']['name'], 'test1')
 | 
			
		||||
        self.assertEqual(res_dict['account']['manager'], 'guy1')
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
 | 
			
		||||
    def test_account_delete(self):
 | 
			
		||||
        req = webob.Request.blank('/v1.0/accounts/test1')
 | 
			
		||||
        req.method = 'DELETE'
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
 | 
			
		||||
    def test_account_create(self):
 | 
			
		||||
        body = dict(account=dict(description='test account',
 | 
			
		||||
                              manager='guy1'))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/accounts/newacct')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
        req.method = 'PUT'
 | 
			
		||||
        req.body = json.dumps(body)
 | 
			
		||||
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(res_dict['account']['id'], 'newacct')
 | 
			
		||||
        self.assertEqual(res_dict['account']['name'], 'newacct')
 | 
			
		||||
        self.assertEqual(res_dict['account']['description'], 'test account')
 | 
			
		||||
        self.assertEqual(res_dict['account']['manager'], 'guy1')
 | 
			
		||||
        self.assertTrue('newacct' in
 | 
			
		||||
                        fakes.FakeAuthManager.projects)
 | 
			
		||||
        self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
 | 
			
		||||
 | 
			
		||||
    def test_account_update(self):
 | 
			
		||||
        body = dict(account=dict(description='test account',
 | 
			
		||||
                              manager='guy2'))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/accounts/test1')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
        req.method = 'PUT'
 | 
			
		||||
        req.body = json.dumps(body)
 | 
			
		||||
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(res_dict['account']['id'], 'test1')
 | 
			
		||||
        self.assertEqual(res_dict['account']['name'], 'test1')
 | 
			
		||||
        self.assertEqual(res_dict['account']['description'], 'test account')
 | 
			
		||||
        self.assertEqual(res_dict['account']['manager'], 'guy2')
 | 
			
		||||
        self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
 | 
			
		||||
@@ -35,7 +35,7 @@ class AdminAPITest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(AdminAPITest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.reset_fake_data()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
 
 | 
			
		||||
@@ -65,7 +65,9 @@ class Test(test.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_authorize_token(self):
 | 
			
		||||
        f = fakes.FakeAuthManager()
 | 
			
		||||
        f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
 | 
			
		||||
        u = nova.auth.manager.User(1, 'herp', None, None, None)
 | 
			
		||||
        f.add_user('derp', u)
 | 
			
		||||
        f.create_project('test', u)
 | 
			
		||||
 | 
			
		||||
        req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
 | 
			
		||||
        req.headers['X-Auth-User'] = 'herp'
 | 
			
		||||
@@ -176,7 +178,9 @@ class TestLimiter(test.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_authorize_token(self):
 | 
			
		||||
        f = fakes.FakeAuthManager()
 | 
			
		||||
        f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
 | 
			
		||||
        u = nova.auth.manager.User(1, 'herp', None, None, None)
 | 
			
		||||
        f.add_user('derp', u)
 | 
			
		||||
        f.create_project('test', u)
 | 
			
		||||
 | 
			
		||||
        req = webob.Request.blank('/v1.0/')
 | 
			
		||||
        req.headers['X-Auth-User'] = 'herp'
 | 
			
		||||
 
 | 
			
		||||
@@ -30,7 +30,7 @@ class FlavorsTest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(FlavorsTest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.reset_fake_data()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
 
 | 
			
		||||
@@ -205,7 +205,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
 | 
			
		||||
        self.orig_image_service = FLAGS.image_service
 | 
			
		||||
        FLAGS.image_service = 'nova.image.glance.GlanceImageService'
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.reset_fake_data()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
 
 | 
			
		||||
@@ -120,7 +120,7 @@ class ServersTest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(ServersTest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.reset_fake_data()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
@@ -440,7 +440,8 @@ class ServersTest(test.TestCase):
 | 
			
		||||
        body = dict(server=dict(
 | 
			
		||||
            name='server_test', imageId=2, flavorId=2, metadata={},
 | 
			
		||||
            personality={}))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/servers/1/inject_network_info')
 | 
			
		||||
        req = webob.Request.blank(
 | 
			
		||||
              '/v1.0/servers/1/inject_network_info')
 | 
			
		||||
        req.method = 'POST'
 | 
			
		||||
        req.content_type = 'application/json'
 | 
			
		||||
        req.body = json.dumps(body)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										141
									
								
								nova/tests/api/openstack/test_users.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								nova/tests/api/openstack/test_users.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,141 @@
 | 
			
		||||
# Copyright 2010 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
import stubout
 | 
			
		||||
import webob
 | 
			
		||||
 | 
			
		||||
import nova.api
 | 
			
		||||
import nova.api.openstack.auth
 | 
			
		||||
from nova import context
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova.auth.manager import User, Project
 | 
			
		||||
from nova.tests.api.openstack import fakes
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
FLAGS.verbose = True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fake_init(self):
 | 
			
		||||
    self.manager = fakes.FakeAuthManager()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fake_admin_check(self, req):
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class UsersTest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(UsersTest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        self.stubs.Set(nova.api.openstack.users.Controller, '__init__',
 | 
			
		||||
                       fake_init)
 | 
			
		||||
        self.stubs.Set(nova.api.openstack.users.Controller, '_check_admin',
 | 
			
		||||
                       fake_admin_check)
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.projects = dict(testacct=Project('testacct',
 | 
			
		||||
                                                               'testacct',
 | 
			
		||||
                                                               'guy1',
 | 
			
		||||
                                                               'test',
 | 
			
		||||
                                                               []))
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
        fakes.stub_out_auth(self.stubs)
 | 
			
		||||
 | 
			
		||||
        self.allow_admin = FLAGS.allow_admin_api
 | 
			
		||||
        FLAGS.allow_admin_api = True
 | 
			
		||||
        fakemgr = fakes.FakeAuthManager()
 | 
			
		||||
        fakemgr.add_user('acc1', User('guy1', 'guy1', 'acc1',
 | 
			
		||||
                                      'fortytwo!', False))
 | 
			
		||||
        fakemgr.add_user('acc2', User('guy2', 'guy2', 'acc2',
 | 
			
		||||
                                      'swordfish', True))
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        self.stubs.UnsetAll()
 | 
			
		||||
        FLAGS.allow_admin_api = self.allow_admin
 | 
			
		||||
        super(UsersTest, self).tearDown()
 | 
			
		||||
 | 
			
		||||
    def test_get_user_list(self):
 | 
			
		||||
        req = webob.Request.blank('/v1.0/users')
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(len(res_dict['users']), 2)
 | 
			
		||||
 | 
			
		||||
    def test_get_user_by_id(self):
 | 
			
		||||
        req = webob.Request.blank('/v1.0/users/guy2')
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res_dict['user']['id'], 'guy2')
 | 
			
		||||
        self.assertEqual(res_dict['user']['name'], 'guy2')
 | 
			
		||||
        self.assertEqual(res_dict['user']['secret'], 'swordfish')
 | 
			
		||||
        self.assertEqual(res_dict['user']['admin'], True)
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
 | 
			
		||||
    def test_user_delete(self):
 | 
			
		||||
        req = webob.Request.blank('/v1.0/users/guy1')
 | 
			
		||||
        req.method = 'DELETE'
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        self.assertTrue('guy1' not in [u.id for u in
 | 
			
		||||
                        fakes.FakeAuthManager.auth_data.values()])
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
 | 
			
		||||
    def test_user_create(self):
 | 
			
		||||
        body = dict(user=dict(name='test_guy',
 | 
			
		||||
                              access='acc3',
 | 
			
		||||
                              secret='invasionIsInNormandy',
 | 
			
		||||
                              admin=True))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/users')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
        req.method = 'POST'
 | 
			
		||||
        req.body = json.dumps(body)
 | 
			
		||||
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(res_dict['user']['id'], 'test_guy')
 | 
			
		||||
        self.assertEqual(res_dict['user']['name'], 'test_guy')
 | 
			
		||||
        self.assertEqual(res_dict['user']['access'], 'acc3')
 | 
			
		||||
        self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy')
 | 
			
		||||
        self.assertEqual(res_dict['user']['admin'], True)
 | 
			
		||||
        self.assertTrue('test_guy' in [u.id for u in
 | 
			
		||||
                        fakes.FakeAuthManager.auth_data.values()])
 | 
			
		||||
        self.assertEqual(len(fakes.FakeAuthManager.auth_data.values()), 3)
 | 
			
		||||
 | 
			
		||||
    def test_user_update(self):
 | 
			
		||||
        body = dict(user=dict(name='guy2',
 | 
			
		||||
                              access='acc2',
 | 
			
		||||
                              secret='invasionIsInNormandy'))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/users/guy2')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
        req.method = 'PUT'
 | 
			
		||||
        req.body = json.dumps(body)
 | 
			
		||||
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(res_dict['user']['id'], 'guy2')
 | 
			
		||||
        self.assertEqual(res_dict['user']['name'], 'guy2')
 | 
			
		||||
        self.assertEqual(res_dict['user']['access'], 'acc2')
 | 
			
		||||
        self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy')
 | 
			
		||||
        self.assertEqual(res_dict['user']['admin'], True)
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
# Copyright 2010 OpenStack LLC.
 | 
			
		||||
# Copyright 2011 OpenStack LLC.
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
@@ -24,6 +24,7 @@ from nova import flags
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova.api.openstack import zones
 | 
			
		||||
from nova.tests.api.openstack import fakes
 | 
			
		||||
from nova.scheduler import api
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
@@ -31,7 +32,7 @@ FLAGS.verbose = True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zone_get(context, zone_id):
 | 
			
		||||
    return dict(id=1, api_url='http://foo.com', username='bob',
 | 
			
		||||
    return dict(id=1, api_url='http://example.com', username='bob',
 | 
			
		||||
                password='xxx')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -42,7 +43,7 @@ def zone_create(context, values):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zone_update(context, zone_id, values):
 | 
			
		||||
    zone = dict(id=zone_id, api_url='http://foo.com', username='bob',
 | 
			
		||||
    zone = dict(id=zone_id, api_url='http://example.com', username='bob',
 | 
			
		||||
                password='xxx')
 | 
			
		||||
    zone.update(values)
 | 
			
		||||
    return zone
 | 
			
		||||
@@ -52,19 +53,33 @@ def zone_delete(context, zone_id):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zone_get_all(context):
 | 
			
		||||
def zone_get_all_scheduler(*args):
 | 
			
		||||
    return [
 | 
			
		||||
        dict(id=1, api_url='http://foo.com', username='bob',
 | 
			
		||||
        dict(id=1, api_url='http://example.com', username='bob',
 | 
			
		||||
                 password='xxx'),
 | 
			
		||||
        dict(id=2, api_url='http://blah.com', username='alice',
 | 
			
		||||
                 password='qwerty')]
 | 
			
		||||
        dict(id=2, api_url='http://example.org', username='alice',
 | 
			
		||||
                 password='qwerty'),
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zone_get_all_scheduler_empty(*args):
 | 
			
		||||
    return []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zone_get_all_db(context):
 | 
			
		||||
    return [
 | 
			
		||||
        dict(id=1, api_url='http://example.com', username='bob',
 | 
			
		||||
                 password='xxx'),
 | 
			
		||||
        dict(id=2, api_url='http://example.org', username='alice',
 | 
			
		||||
                 password='qwerty'),
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ZonesTest(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(ZonesTest, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        fakes.FakeAuthManager.auth_data = {}
 | 
			
		||||
        fakes.FakeAuthManager.reset_fake_data()
 | 
			
		||||
        fakes.FakeAuthDatabase.data = {}
 | 
			
		||||
        fakes.stub_out_networking(self.stubs)
 | 
			
		||||
        fakes.stub_out_rate_limiting(self.stubs)
 | 
			
		||||
@@ -74,7 +89,6 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        FLAGS.allow_admin_api = True
 | 
			
		||||
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_get', zone_get)
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_get_all', zone_get_all)
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_update', zone_update)
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_create', zone_create)
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_delete', zone_delete)
 | 
			
		||||
@@ -84,7 +98,19 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        FLAGS.allow_admin_api = self.allow_admin
 | 
			
		||||
        super(ZonesTest, self).tearDown()
 | 
			
		||||
 | 
			
		||||
    def test_get_zone_list(self):
 | 
			
		||||
    def test_get_zone_list_scheduler(self):
 | 
			
		||||
        self.stubs.Set(api.API, '_call_scheduler', zone_get_all_scheduler)
 | 
			
		||||
        req = webob.Request.blank('/v1.0/zones')
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        self.assertEqual(len(res_dict['zones']), 2)
 | 
			
		||||
 | 
			
		||||
    def test_get_zone_list_db(self):
 | 
			
		||||
        self.stubs.Set(api.API, '_call_scheduler',
 | 
			
		||||
                                zone_get_all_scheduler_empty)
 | 
			
		||||
        self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
 | 
			
		||||
        req = webob.Request.blank('/v1.0/zones')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
        res = req.get_response(fakes.wsgi_app())
 | 
			
		||||
@@ -101,7 +127,7 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['id'], 1)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
 | 
			
		||||
        self.assertFalse('password' in res_dict['zone'])
 | 
			
		||||
 | 
			
		||||
    def test_zone_delete(self):
 | 
			
		||||
@@ -112,7 +138,7 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
 | 
			
		||||
    def test_zone_create(self):
 | 
			
		||||
        body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
 | 
			
		||||
        body = dict(zone=dict(api_url='http://example.com', username='fred',
 | 
			
		||||
                        password='fubar'))
 | 
			
		||||
        req = webob.Request.blank('/v1.0/zones')
 | 
			
		||||
        req.headers["Content-Type"] = "application/json"
 | 
			
		||||
@@ -124,7 +150,7 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['id'], 1)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
 | 
			
		||||
        self.assertFalse('username' in res_dict['zone'])
 | 
			
		||||
 | 
			
		||||
    def test_zone_update(self):
 | 
			
		||||
@@ -139,5 +165,5 @@ class ZonesTest(test.TestCase):
 | 
			
		||||
        self.assertEqual(res.status_int, 200)
 | 
			
		||||
        res_dict = json.loads(res.body)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['id'], 1)
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
 | 
			
		||||
        self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
 | 
			
		||||
        self.assertFalse('username' in res_dict['zone'])
 | 
			
		||||
 
 | 
			
		||||
@@ -77,6 +77,7 @@ def stub_out_db_instance_api(stubs):
 | 
			
		||||
            'mac_address': values['mac_address'],
 | 
			
		||||
            'vcpus': type_data['vcpus'],
 | 
			
		||||
            'local_gb': type_data['local_gb'],
 | 
			
		||||
            'os_type': values['os_type']
 | 
			
		||||
            }
 | 
			
		||||
        return FakeModel(base_options)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -20,6 +20,7 @@ Unit Tests for network code
 | 
			
		||||
"""
 | 
			
		||||
import IPy
 | 
			
		||||
import os
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
from nova import context
 | 
			
		||||
from nova import db
 | 
			
		||||
@@ -29,11 +30,153 @@ from nova import log as logging
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova.auth import manager
 | 
			
		||||
from nova.network import linux_net
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
LOG = logging.getLogger('nova.tests.network')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IptablesManagerTestCase(test.TestCase):
 | 
			
		||||
    sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
 | 
			
		||||
                     '*filter',
 | 
			
		||||
                     ':INPUT ACCEPT [2223527:305688874]',
 | 
			
		||||
                     ':FORWARD ACCEPT [0:0]',
 | 
			
		||||
                     ':OUTPUT ACCEPT [2172501:140856656]',
 | 
			
		||||
                     ':nova-compute-FORWARD - [0:0]',
 | 
			
		||||
                     ':nova-compute-INPUT - [0:0]',
 | 
			
		||||
                     ':nova-compute-local - [0:0]',
 | 
			
		||||
                     ':nova-compute-OUTPUT - [0:0]',
 | 
			
		||||
                     ':nova-filter-top - [0:0]',
 | 
			
		||||
                     '-A FORWARD -j nova-filter-top ',
 | 
			
		||||
                     '-A OUTPUT -j nova-filter-top ',
 | 
			
		||||
                     '-A nova-filter-top -j nova-compute-local ',
 | 
			
		||||
                     '-A INPUT -j nova-compute-INPUT ',
 | 
			
		||||
                     '-A OUTPUT -j nova-compute-OUTPUT ',
 | 
			
		||||
                     '-A FORWARD -j nova-compute-FORWARD ',
 | 
			
		||||
                     '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
 | 
			
		||||
                     '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
 | 
			
		||||
                     '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
 | 
			
		||||
                     '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
 | 
			
		||||
                     '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
 | 
			
		||||
                     '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
 | 
			
		||||
                     '-A FORWARD -o virbr0 -j REJECT --reject-with '
 | 
			
		||||
                     'icmp-port-unreachable ',
 | 
			
		||||
                     '-A FORWARD -i virbr0 -j REJECT --reject-with '
 | 
			
		||||
                     'icmp-port-unreachable ',
 | 
			
		||||
                     'COMMIT',
 | 
			
		||||
                     '# Completed on Fri Feb 18 15:17:05 2011']
 | 
			
		||||
 | 
			
		||||
    sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
 | 
			
		||||
                  '*nat',
 | 
			
		||||
                  ':PREROUTING ACCEPT [3936:762355]',
 | 
			
		||||
                  ':INPUT ACCEPT [2447:225266]',
 | 
			
		||||
                  ':OUTPUT ACCEPT [63491:4191863]',
 | 
			
		||||
                  ':POSTROUTING ACCEPT [63112:4108641]',
 | 
			
		||||
                  ':nova-compute-OUTPUT - [0:0]',
 | 
			
		||||
                  ':nova-compute-floating-ip-snat - [0:0]',
 | 
			
		||||
                  ':nova-compute-SNATTING - [0:0]',
 | 
			
		||||
                  ':nova-compute-PREROUTING - [0:0]',
 | 
			
		||||
                  ':nova-compute-POSTROUTING - [0:0]',
 | 
			
		||||
                  ':nova-postrouting-bottom - [0:0]',
 | 
			
		||||
                  '-A PREROUTING -j nova-compute-PREROUTING ',
 | 
			
		||||
                  '-A OUTPUT -j nova-compute-OUTPUT ',
 | 
			
		||||
                  '-A POSTROUTING -j nova-compute-POSTROUTING ',
 | 
			
		||||
                  '-A POSTROUTING -j nova-postrouting-bottom ',
 | 
			
		||||
                  '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
 | 
			
		||||
                  '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
 | 
			
		||||
                  'COMMIT',
 | 
			
		||||
                  '# Completed on Fri Feb 18 15:17:05 2011']
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(IptablesManagerTestCase, self).setUp()
 | 
			
		||||
        self.manager = linux_net.IptablesManager()
 | 
			
		||||
 | 
			
		||||
    def test_filter_rules_are_wrapped(self):
 | 
			
		||||
        current_lines = self.sample_filter
 | 
			
		||||
 | 
			
		||||
        table = self.manager.ipv4['filter']
 | 
			
		||||
        table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
 | 
			
		||||
        new_lines = self.manager._modify_rules(current_lines, table)
 | 
			
		||||
        self.assertTrue('-A run_tests.py-FORWARD '
 | 
			
		||||
                        '-s 1.2.3.4/5 -j DROP' in new_lines)
 | 
			
		||||
 | 
			
		||||
        table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
 | 
			
		||||
        new_lines = self.manager._modify_rules(current_lines, table)
 | 
			
		||||
        self.assertTrue('-A run_tests.py-FORWARD '
 | 
			
		||||
                        '-s 1.2.3.4/5 -j DROP' not in new_lines)
 | 
			
		||||
 | 
			
		||||
    def test_nat_rules(self):
 | 
			
		||||
        current_lines = self.sample_nat
 | 
			
		||||
        new_lines = self.manager._modify_rules(current_lines,
 | 
			
		||||
                                               self.manager.ipv4['nat'])
 | 
			
		||||
 | 
			
		||||
        for line in [':nova-compute-OUTPUT - [0:0]',
 | 
			
		||||
                     ':nova-compute-floating-ip-snat - [0:0]',
 | 
			
		||||
                     ':nova-compute-SNATTING - [0:0]',
 | 
			
		||||
                     ':nova-compute-PREROUTING - [0:0]',
 | 
			
		||||
                     ':nova-compute-POSTROUTING - [0:0]']:
 | 
			
		||||
            self.assertTrue(line in new_lines, "One of nova-compute's chains "
 | 
			
		||||
                                               "went missing.")
 | 
			
		||||
 | 
			
		||||
        seen_lines = set()
 | 
			
		||||
        for line in new_lines:
 | 
			
		||||
            line = line.strip()
 | 
			
		||||
            self.assertTrue(line not in seen_lines,
 | 
			
		||||
                            "Duplicate line: %s" % line)
 | 
			
		||||
            seen_lines.add(line)
 | 
			
		||||
 | 
			
		||||
        last_postrouting_line = ''
 | 
			
		||||
 | 
			
		||||
        for line in new_lines:
 | 
			
		||||
            if line.startswith('-A POSTROUTING'):
 | 
			
		||||
                last_postrouting_line = line
 | 
			
		||||
 | 
			
		||||
        self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
 | 
			
		||||
                        "Last POSTROUTING rule does not jump to "
 | 
			
		||||
                        "nova-postouting-bottom: %s" % last_postrouting_line)
 | 
			
		||||
 | 
			
		||||
        for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
 | 
			
		||||
            self.assertTrue('-A %s -j run_tests.py-%s' \
 | 
			
		||||
                            % (chain, chain) in new_lines,
 | 
			
		||||
                            "Built-in chain %s not wrapped" % (chain,))
 | 
			
		||||
 | 
			
		||||
    def test_filter_rules(self):
 | 
			
		||||
        current_lines = self.sample_filter
 | 
			
		||||
        new_lines = self.manager._modify_rules(current_lines,
 | 
			
		||||
                                               self.manager.ipv4['filter'])
 | 
			
		||||
 | 
			
		||||
        for line in [':nova-compute-FORWARD - [0:0]',
 | 
			
		||||
                     ':nova-compute-INPUT - [0:0]',
 | 
			
		||||
                     ':nova-compute-local - [0:0]',
 | 
			
		||||
                     ':nova-compute-OUTPUT - [0:0]']:
 | 
			
		||||
            self.assertTrue(line in new_lines, "One of nova-compute's chains"
 | 
			
		||||
                                               " went missing.")
 | 
			
		||||
 | 
			
		||||
        seen_lines = set()
 | 
			
		||||
        for line in new_lines:
 | 
			
		||||
            line = line.strip()
 | 
			
		||||
            self.assertTrue(line not in seen_lines,
 | 
			
		||||
                            "Duplicate line: %s" % line)
 | 
			
		||||
            seen_lines.add(line)
 | 
			
		||||
 | 
			
		||||
        for chain in ['FORWARD', 'OUTPUT']:
 | 
			
		||||
            for line in new_lines:
 | 
			
		||||
                if line.startswith('-A %s' % chain):
 | 
			
		||||
                    self.assertTrue('-j nova-filter-top' in line,
 | 
			
		||||
                                    "First %s rule does not "
 | 
			
		||||
                                    "jump to nova-filter-top" % chain)
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
        self.assertTrue('-A nova-filter-top '
 | 
			
		||||
                        '-j run_tests.py-local' in new_lines,
 | 
			
		||||
                        "nova-filter-top does not jump to wrapped local chain")
 | 
			
		||||
 | 
			
		||||
        for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
 | 
			
		||||
            self.assertTrue('-A %s -j run_tests.py-%s' \
 | 
			
		||||
                            % (chain, chain) in new_lines,
 | 
			
		||||
                            "Built-in chain %s not wrapped" % (chain,))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NetworkTestCase(test.TestCase):
 | 
			
		||||
    """Test cases for network code"""
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
@@ -321,6 +464,31 @@ class NetworkTestCase(test.TestCase):
 | 
			
		||||
                                                  network['id'])
 | 
			
		||||
        self.assertEqual(ip_count, num_available_ips)
 | 
			
		||||
 | 
			
		||||
    def test_dhcp_lease_output(self):
 | 
			
		||||
        admin_ctxt = context.get_admin_context()
 | 
			
		||||
        address = self._create_address(0, self.instance_id)
 | 
			
		||||
        lease_ip(address)
 | 
			
		||||
        network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id)
 | 
			
		||||
        leases = linux_net.get_dhcp_leases(context.get_admin_context(),
 | 
			
		||||
                                           network_ref['id'])
 | 
			
		||||
        for line in leases.split('\n'):
 | 
			
		||||
            seconds, mac, ip, hostname, client_id = line.split(' ')
 | 
			
		||||
            self.assertTrue(int(seconds) > time.time(), 'Lease expires in '
 | 
			
		||||
                                                        'the past')
 | 
			
		||||
            octets = mac.split(':')
 | 
			
		||||
            self.assertEqual(len(octets), 6, "Wrong number of octets "
 | 
			
		||||
                                             "in %s" % (max,))
 | 
			
		||||
            for octet in octets:
 | 
			
		||||
                self.assertEqual(len(octet), 2, "Oddly sized octet: %s"
 | 
			
		||||
                                                                    % (octet,))
 | 
			
		||||
                # This will throw an exception if the octet is invalid
 | 
			
		||||
                int(octet, 16)
 | 
			
		||||
 | 
			
		||||
            # And this will raise an exception in case of an invalid IP
 | 
			
		||||
            IPy.IP(ip)
 | 
			
		||||
 | 
			
		||||
        release_ip(address)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_allocated_in_project(address, project_id):
 | 
			
		||||
    """Returns true if address is in specified project"""
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@
 | 
			
		||||
import eventlet
 | 
			
		||||
import mox
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from xml.etree.ElementTree import fromstring as xml_to_tree
 | 
			
		||||
@@ -521,16 +522,22 @@ class IptablesFirewallTestCase(test.TestCase):
 | 
			
		||||
        self.manager.delete_user(self.user)
 | 
			
		||||
        super(IptablesFirewallTestCase, self).tearDown()
 | 
			
		||||
 | 
			
		||||
    in_rules = [
 | 
			
		||||
    in_nat_rules = [
 | 
			
		||||
      '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
 | 
			
		||||
      '*nat',
 | 
			
		||||
      ':PREROUTING ACCEPT [1170:189210]',
 | 
			
		||||
      ':INPUT ACCEPT [844:71028]',
 | 
			
		||||
      ':OUTPUT ACCEPT [5149:405186]',
 | 
			
		||||
      ':POSTROUTING ACCEPT [5063:386098]',
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    in_filter_rules = [
 | 
			
		||||
      '# Generated by iptables-save v1.4.4 on Mon Dec  6 11:54:13 2010',
 | 
			
		||||
      '*filter',
 | 
			
		||||
      ':INPUT ACCEPT [969615:281627771]',
 | 
			
		||||
      ':FORWARD ACCEPT [0:0]',
 | 
			
		||||
      ':OUTPUT ACCEPT [915599:63811649]',
 | 
			
		||||
      ':nova-block-ipv4 - [0:0]',
 | 
			
		||||
      '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
 | 
			
		||||
      '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
 | 
			
		||||
      '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
 | 
			
		||||
      '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
 | 
			
		||||
      '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
 | 
			
		||||
      ',ESTABLISHED -j ACCEPT ',
 | 
			
		||||
@@ -542,7 +549,7 @@ class IptablesFirewallTestCase(test.TestCase):
 | 
			
		||||
      '# Completed on Mon Dec  6 11:54:13 2010',
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    in6_rules = [
 | 
			
		||||
    in6_filter_rules = [
 | 
			
		||||
      '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
 | 
			
		||||
      '*filter',
 | 
			
		||||
      ':INPUT ACCEPT [349155:75810423]',
 | 
			
		||||
@@ -605,21 +612,31 @@ class IptablesFirewallTestCase(test.TestCase):
 | 
			
		||||
        def fake_iptables_execute(*cmd, **kwargs):
 | 
			
		||||
            process_input = kwargs.get('process_input', None)
 | 
			
		||||
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
 | 
			
		||||
                return '\n'.join(self.in6_rules), None
 | 
			
		||||
                return '\n'.join(self.in6_filter_rules), None
 | 
			
		||||
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
 | 
			
		||||
                return '\n'.join(self.in_rules), None
 | 
			
		||||
                return '\n'.join(self.in_filter_rules), None
 | 
			
		||||
            if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
 | 
			
		||||
                return '\n'.join(self.in_nat_rules), None
 | 
			
		||||
            if cmd == ('sudo', 'iptables-restore'):
 | 
			
		||||
                self.out_rules = process_input.split('\n')
 | 
			
		||||
                lines = process_input.split('\n')
 | 
			
		||||
                if '*filter' in lines:
 | 
			
		||||
                    self.out_rules = lines
 | 
			
		||||
                return '', ''
 | 
			
		||||
            if cmd == ('sudo', 'ip6tables-restore'):
 | 
			
		||||
                self.out6_rules = process_input.split('\n')
 | 
			
		||||
                lines = process_input.split('\n')
 | 
			
		||||
                if '*filter' in lines:
 | 
			
		||||
                    self.out6_rules = lines
 | 
			
		||||
                return '', ''
 | 
			
		||||
        self.fw.execute = fake_iptables_execute
 | 
			
		||||
            print cmd, kwargs
 | 
			
		||||
 | 
			
		||||
        from nova.network import linux_net
 | 
			
		||||
        linux_net.iptables_manager.execute = fake_iptables_execute
 | 
			
		||||
 | 
			
		||||
        self.fw.prepare_instance_filter(instance_ref)
 | 
			
		||||
        self.fw.apply_instance_filter(instance_ref)
 | 
			
		||||
 | 
			
		||||
        in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
 | 
			
		||||
        in_rules = filter(lambda l: not l.startswith('#'),
 | 
			
		||||
                          self.in_filter_rules)
 | 
			
		||||
        for rule in in_rules:
 | 
			
		||||
            if not 'nova' in rule:
 | 
			
		||||
                self.assertTrue(rule in self.out_rules,
 | 
			
		||||
@@ -642,17 +659,18 @@ class IptablesFirewallTestCase(test.TestCase):
 | 
			
		||||
        self.assertTrue(security_group_chain,
 | 
			
		||||
                        "The security group chain wasn't added")
 | 
			
		||||
 | 
			
		||||
        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
 | 
			
		||||
                               security_group_chain in self.out_rules,
 | 
			
		||||
        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
 | 
			
		||||
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
 | 
			
		||||
                        "ICMP acceptance rule wasn't added")
 | 
			
		||||
 | 
			
		||||
        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
 | 
			
		||||
                        '8 -j ACCEPT' % security_group_chain in self.out_rules,
 | 
			
		||||
        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
 | 
			
		||||
                           '--icmp-type 8 -j ACCEPT')
 | 
			
		||||
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
 | 
			
		||||
                        "ICMP Echo Request acceptance rule wasn't added")
 | 
			
		||||
 | 
			
		||||
        self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
 | 
			
		||||
                        '--dports 80:81 -j ACCEPT' % security_group_chain \
 | 
			
		||||
                            in self.out_rules,
 | 
			
		||||
        regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
 | 
			
		||||
                           '--dports 80:81 -j ACCEPT')
 | 
			
		||||
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
 | 
			
		||||
                        "TCP port 80/81 acceptance rule wasn't added")
 | 
			
		||||
        db.instance_destroy(admin_ctxt, instance_ref['id'])
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -18,6 +18,7 @@
 | 
			
		||||
Test suite for XenAPI
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import functools
 | 
			
		||||
import stubout
 | 
			
		||||
 | 
			
		||||
from nova import db
 | 
			
		||||
@@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
 | 
			
		||||
    """
 | 
			
		||||
    vm_utils.with_vdi_attached_here needs to be stubbed out because it
 | 
			
		||||
    calls down to the filesystem to attach a vdi. This provides a
 | 
			
		||||
    decorator to handle that.
 | 
			
		||||
    """
 | 
			
		||||
    @functools.wraps(function)
 | 
			
		||||
    def decorated_function(self, *args, **kwargs):
 | 
			
		||||
        orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
 | 
			
		||||
        vm_utils.with_vdi_attached_here = lambda *x: should_return
 | 
			
		||||
        function(self, *args, **kwargs)
 | 
			
		||||
        vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
 | 
			
		||||
    return decorated_function
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class XenAPIVolumeTestCase(test.TestCase):
 | 
			
		||||
    """
 | 
			
		||||
    Unit tests for Volume operations
 | 
			
		||||
@@ -62,6 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
 | 
			
		||||
                  'ramdisk_id': 3,
 | 
			
		||||
                  'instance_type': 'm1.large',
 | 
			
		||||
                  'mac_address': 'aa:bb:cc:dd:ee:ff',
 | 
			
		||||
                  'os_type': 'linux'
 | 
			
		||||
                  }
 | 
			
		||||
 | 
			
		||||
    def _create_volume(self, size='0'):
 | 
			
		||||
@@ -219,7 +236,7 @@ class XenAPIVMTestCase(test.TestCase):
 | 
			
		||||
 | 
			
		||||
        check()
 | 
			
		||||
 | 
			
		||||
    def check_vm_record(self, conn):
 | 
			
		||||
    def create_vm_record(self, conn, os_type):
 | 
			
		||||
        instances = conn.list_instances()
 | 
			
		||||
        self.assertEquals(instances, [1])
 | 
			
		||||
 | 
			
		||||
@@ -231,28 +248,63 @@ class XenAPIVMTestCase(test.TestCase):
 | 
			
		||||
               in xenapi_fake.get_all_records('VM').iteritems()
 | 
			
		||||
               if not rec['is_control_domain']]
 | 
			
		||||
        vm = vms[0]
 | 
			
		||||
        self.vm_info = vm_info
 | 
			
		||||
        self.vm = vm
 | 
			
		||||
 | 
			
		||||
    def check_vm_record(self, conn):
 | 
			
		||||
        # Check that m1.large above turned into the right thing.
 | 
			
		||||
        instance_type = db.instance_type_get_by_name(conn, 'm1.large')
 | 
			
		||||
        mem_kib = long(instance_type['memory_mb']) << 10
 | 
			
		||||
        mem_bytes = str(mem_kib << 10)
 | 
			
		||||
        vcpus = instance_type['vcpus']
 | 
			
		||||
        self.assertEquals(vm_info['max_mem'], mem_kib)
 | 
			
		||||
        self.assertEquals(vm_info['mem'], mem_kib)
 | 
			
		||||
        self.assertEquals(vm['memory_static_max'], mem_bytes)
 | 
			
		||||
        self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
 | 
			
		||||
        self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
 | 
			
		||||
        self.assertEquals(vm['VCPUs_max'], str(vcpus))
 | 
			
		||||
        self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
 | 
			
		||||
        self.assertEquals(self.vm_info['max_mem'], mem_kib)
 | 
			
		||||
        self.assertEquals(self.vm_info['mem'], mem_kib)
 | 
			
		||||
        self.assertEquals(self.vm['memory_static_max'], mem_bytes)
 | 
			
		||||
        self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
 | 
			
		||||
        self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
 | 
			
		||||
        self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
 | 
			
		||||
        self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
 | 
			
		||||
 | 
			
		||||
        # Check that the VM is running according to Nova
 | 
			
		||||
        self.assertEquals(vm_info['state'], power_state.RUNNING)
 | 
			
		||||
        self.assertEquals(self.vm_info['state'], power_state.RUNNING)
 | 
			
		||||
 | 
			
		||||
        # Check that the VM is running according to XenAPI.
 | 
			
		||||
        self.assertEquals(vm['power_state'], 'Running')
 | 
			
		||||
        self.assertEquals(self.vm['power_state'], 'Running')
 | 
			
		||||
 | 
			
		||||
    def check_vm_params_for_windows(self):
 | 
			
		||||
        self.assertEquals(self.vm['platform']['nx'], 'true')
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
 | 
			
		||||
 | 
			
		||||
        # check that these are not set
 | 
			
		||||
        self.assertEquals(self.vm['PV_args'], '')
 | 
			
		||||
        self.assertEquals(self.vm['PV_bootloader'], '')
 | 
			
		||||
        self.assertEquals(self.vm['PV_kernel'], '')
 | 
			
		||||
        self.assertEquals(self.vm['PV_ramdisk'], '')
 | 
			
		||||
 | 
			
		||||
    def check_vm_params_for_linux(self):
 | 
			
		||||
        self.assertEquals(self.vm['platform']['nx'], 'false')
 | 
			
		||||
        self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
 | 
			
		||||
        self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
 | 
			
		||||
 | 
			
		||||
        # check that these are not set
 | 
			
		||||
        self.assertEquals(self.vm['PV_kernel'], '')
 | 
			
		||||
        self.assertEquals(self.vm['PV_ramdisk'], '')
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_params'], {})
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_policy'], '')
 | 
			
		||||
 | 
			
		||||
    def check_vm_params_for_linux_with_external_kernel(self):
 | 
			
		||||
        self.assertEquals(self.vm['platform']['nx'], 'false')
 | 
			
		||||
        self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
 | 
			
		||||
        self.assertNotEquals(self.vm['PV_kernel'], '')
 | 
			
		||||
        self.assertNotEquals(self.vm['PV_ramdisk'], '')
 | 
			
		||||
 | 
			
		||||
        # check that these are not set
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_params'], {})
 | 
			
		||||
        self.assertEquals(self.vm['HVM_boot_policy'], '')
 | 
			
		||||
 | 
			
		||||
    def _test_spawn(self, image_id, kernel_id, ramdisk_id,
 | 
			
		||||
                    instance_type="m1.large"):
 | 
			
		||||
                    instance_type="m1.large", os_type="linux"):
 | 
			
		||||
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
 | 
			
		||||
        values = {'name': 1,
 | 
			
		||||
                  'id': 1,
 | 
			
		||||
@@ -263,10 +315,12 @@ class XenAPIVMTestCase(test.TestCase):
 | 
			
		||||
                  'ramdisk_id': ramdisk_id,
 | 
			
		||||
                  'instance_type': instance_type,
 | 
			
		||||
                  'mac_address': 'aa:bb:cc:dd:ee:ff',
 | 
			
		||||
                  'os_type': os_type
 | 
			
		||||
                  }
 | 
			
		||||
        conn = xenapi_conn.get_connection(False)
 | 
			
		||||
        instance = db.instance_create(values)
 | 
			
		||||
        conn.spawn(instance)
 | 
			
		||||
        self.create_vm_record(conn, os_type)
 | 
			
		||||
        self.check_vm_record(conn)
 | 
			
		||||
 | 
			
		||||
    def test_spawn_not_enough_memory(self):
 | 
			
		||||
@@ -283,24 +337,37 @@ class XenAPIVMTestCase(test.TestCase):
 | 
			
		||||
        FLAGS.xenapi_image_service = 'objectstore'
 | 
			
		||||
        self._test_spawn(1, 2, 3)
 | 
			
		||||
 | 
			
		||||
    @stub_vm_utils_with_vdi_attached_here
 | 
			
		||||
    def test_spawn_raw_glance(self):
 | 
			
		||||
        FLAGS.xenapi_image_service = 'glance'
 | 
			
		||||
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
 | 
			
		||||
        self.check_vm_params_for_linux()
 | 
			
		||||
 | 
			
		||||
    def test_spawn_vhd_glance(self):
 | 
			
		||||
    def test_spawn_vhd_glance_linux(self):
 | 
			
		||||
        FLAGS.xenapi_image_service = 'glance'
 | 
			
		||||
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
 | 
			
		||||
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
 | 
			
		||||
                         os_type="linux")
 | 
			
		||||
        self.check_vm_params_for_linux()
 | 
			
		||||
 | 
			
		||||
    def test_spawn_vhd_glance_windows(self):
 | 
			
		||||
        FLAGS.xenapi_image_service = 'glance'
 | 
			
		||||
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
 | 
			
		||||
                         os_type="windows")
 | 
			
		||||
        self.check_vm_params_for_windows()
 | 
			
		||||
 | 
			
		||||
    def test_spawn_glance(self):
 | 
			
		||||
        FLAGS.xenapi_image_service = 'glance'
 | 
			
		||||
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
 | 
			
		||||
                         glance_stubs.FakeGlance.IMAGE_KERNEL,
 | 
			
		||||
                         glance_stubs.FakeGlance.IMAGE_RAMDISK)
 | 
			
		||||
        self.check_vm_params_for_linux_with_external_kernel()
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        super(XenAPIVMTestCase, self).tearDown()
 | 
			
		||||
        self.manager.delete_project(self.project)
 | 
			
		||||
        self.manager.delete_user(self.user)
 | 
			
		||||
        self.vm_info = None
 | 
			
		||||
        self.vm = None
 | 
			
		||||
        self.stubs.UnsetAll()
 | 
			
		||||
 | 
			
		||||
    def _create_instance(self):
 | 
			
		||||
@@ -314,7 +381,8 @@ class XenAPIVMTestCase(test.TestCase):
 | 
			
		||||
            'kernel_id': 2,
 | 
			
		||||
            'ramdisk_id': 3,
 | 
			
		||||
            'instance_type': 'm1.large',
 | 
			
		||||
            'mac_address': 'aa:bb:cc:dd:ee:ff'}
 | 
			
		||||
            'mac_address': 'aa:bb:cc:dd:ee:ff',
 | 
			
		||||
            'os_type': 'linux'}
 | 
			
		||||
        instance = db.instance_create(values)
 | 
			
		||||
        self.conn.spawn(instance)
 | 
			
		||||
        return instance
 | 
			
		||||
@@ -372,6 +440,7 @@ class XenAPIMigrateInstance(test.TestCase):
 | 
			
		||||
                  'ramdisk_id': None,
 | 
			
		||||
                  'instance_type': 'm1.large',
 | 
			
		||||
                  'mac_address': 'aa:bb:cc:dd:ee:ff',
 | 
			
		||||
                  'os_type': 'linux'
 | 
			
		||||
                  }
 | 
			
		||||
        stubs.stub_out_migration_methods(self.stubs)
 | 
			
		||||
        glance_stubs.stubout_glance_client(self.stubs,
 | 
			
		||||
@@ -410,6 +479,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
 | 
			
		||||
 | 
			
		||||
        self.fake_instance = FakeInstance()
 | 
			
		||||
        self.fake_instance.id = 42
 | 
			
		||||
        self.fake_instance.os_type = 'linux'
 | 
			
		||||
 | 
			
		||||
    def assert_disk_type(self, disk_type):
 | 
			
		||||
        dt = vm_utils.VMHelper.determine_disk_image_type(
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										172
									
								
								nova/tests/test_zones.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										172
									
								
								nova/tests/test_zones.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,172 @@
 | 
			
		||||
# Copyright 2010 United States Government as represented by the
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
"""
 | 
			
		||||
Tests For ZoneManager
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
import mox
 | 
			
		||||
import novaclient
 | 
			
		||||
 | 
			
		||||
from nova import context
 | 
			
		||||
from nova import db
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import service
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova import rpc
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova.auth import manager as auth_manager
 | 
			
		||||
from nova.scheduler import zone_manager
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FakeZone:
 | 
			
		||||
    """Represents a fake zone from the db"""
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        for k, v in kwargs.iteritems():
 | 
			
		||||
            setattr(self, k, v)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def exploding_novaclient(zone):
 | 
			
		||||
    """Used when we want to simulate a novaclient call failing."""
 | 
			
		||||
    raise Exception("kaboom")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ZoneManagerTestCase(test.TestCase):
 | 
			
		||||
    """Test case for zone manager"""
 | 
			
		||||
    def test_ping(self):
 | 
			
		||||
        zm = zone_manager.ZoneManager()
 | 
			
		||||
        self.mox.StubOutWithMock(zm, '_refresh_from_db')
 | 
			
		||||
        self.mox.StubOutWithMock(zm, '_poll_zones')
 | 
			
		||||
        zm._refresh_from_db(mox.IgnoreArg())
 | 
			
		||||
        zm._poll_zones(mox.IgnoreArg())
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zm.ping(None)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
 | 
			
		||||
    def test_refresh_from_db_new(self):
 | 
			
		||||
        zm = zone_manager.ZoneManager()
 | 
			
		||||
 | 
			
		||||
        self.mox.StubOutWithMock(db, 'zone_get_all')
 | 
			
		||||
        db.zone_get_all(mox.IgnoreArg()).AndReturn([
 | 
			
		||||
               FakeZone(id=1, api_url='http://foo.com', username='user1',
 | 
			
		||||
                    password='pass1'),
 | 
			
		||||
            ])
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 0)
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zm._refresh_from_db(None)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
        self.assertEquals(zm.zone_states[1].username, 'user1')
 | 
			
		||||
 | 
			
		||||
    def test_refresh_from_db_replace_existing(self):
 | 
			
		||||
        zm = zone_manager.ZoneManager()
 | 
			
		||||
        zone_state = zone_manager.ZoneState()
 | 
			
		||||
        zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
 | 
			
		||||
                        username='user1', password='pass1'))
 | 
			
		||||
        zm.zone_states[1] = zone_state
 | 
			
		||||
 | 
			
		||||
        self.mox.StubOutWithMock(db, 'zone_get_all')
 | 
			
		||||
        db.zone_get_all(mox.IgnoreArg()).AndReturn([
 | 
			
		||||
               FakeZone(id=1, api_url='http://foo.com', username='user2',
 | 
			
		||||
                    password='pass2'),
 | 
			
		||||
            ])
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zm._refresh_from_db(None)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
        self.assertEquals(zm.zone_states[1].username, 'user2')
 | 
			
		||||
 | 
			
		||||
    def test_refresh_from_db_missing(self):
 | 
			
		||||
        zm = zone_manager.ZoneManager()
 | 
			
		||||
        zone_state = zone_manager.ZoneState()
 | 
			
		||||
        zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
 | 
			
		||||
                        username='user1', password='pass1'))
 | 
			
		||||
        zm.zone_states[1] = zone_state
 | 
			
		||||
 | 
			
		||||
        self.mox.StubOutWithMock(db, 'zone_get_all')
 | 
			
		||||
        db.zone_get_all(mox.IgnoreArg()).AndReturn([])
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zm._refresh_from_db(None)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 0)
 | 
			
		||||
 | 
			
		||||
    def test_refresh_from_db_add_and_delete(self):
 | 
			
		||||
        zm = zone_manager.ZoneManager()
 | 
			
		||||
        zone_state = zone_manager.ZoneState()
 | 
			
		||||
        zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
 | 
			
		||||
                        username='user1', password='pass1'))
 | 
			
		||||
        zm.zone_states[1] = zone_state
 | 
			
		||||
 | 
			
		||||
        self.mox.StubOutWithMock(db, 'zone_get_all')
 | 
			
		||||
 | 
			
		||||
        db.zone_get_all(mox.IgnoreArg()).AndReturn([
 | 
			
		||||
               FakeZone(id=2, api_url='http://foo.com', username='user2',
 | 
			
		||||
                    password='pass2'),
 | 
			
		||||
            ])
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zm._refresh_from_db(None)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
 | 
			
		||||
        self.assertEquals(len(zm.zone_states), 1)
 | 
			
		||||
        self.assertEquals(zm.zone_states[2].username, 'user2')
 | 
			
		||||
 | 
			
		||||
    def test_poll_zone(self):
 | 
			
		||||
        self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
 | 
			
		||||
        zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
 | 
			
		||||
                        dict(name='zohan', capabilities='hairdresser'))
 | 
			
		||||
 | 
			
		||||
        zone_state = zone_manager.ZoneState()
 | 
			
		||||
        zone_state.update_credentials(FakeZone(id=2,
 | 
			
		||||
                       api_url='http://foo.com', username='user2',
 | 
			
		||||
                       password='pass2'))
 | 
			
		||||
        zone_state.attempt = 1
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zone_manager._poll_zone(zone_state)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
        self.assertEquals(zone_state.attempt, 0)
 | 
			
		||||
        self.assertEquals(zone_state.name, 'zohan')
 | 
			
		||||
 | 
			
		||||
    def test_poll_zone_fails(self):
 | 
			
		||||
        self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
 | 
			
		||||
 | 
			
		||||
        zone_state = zone_manager.ZoneState()
 | 
			
		||||
        zone_state.update_credentials(FakeZone(id=2,
 | 
			
		||||
                       api_url='http://foo.com', username='user2',
 | 
			
		||||
                       password='pass2'))
 | 
			
		||||
        zone_state.attempt = FLAGS.zone_failures_to_offline - 1
 | 
			
		||||
 | 
			
		||||
        self.mox.ReplayAll()
 | 
			
		||||
        zone_manager._poll_zone(zone_state)
 | 
			
		||||
        self.mox.VerifyAll()
 | 
			
		||||
        self.assertEquals(zone_state.attempt, 3)
 | 
			
		||||
        self.assertFalse(zone_state.is_active)
 | 
			
		||||
        self.assertEquals(zone_state.name, None)
 | 
			
		||||
@@ -139,34 +139,44 @@ def execute(*cmd, **kwargs):
 | 
			
		||||
    stdin = kwargs.get('stdin', subprocess.PIPE)
 | 
			
		||||
    stdout = kwargs.get('stdout', subprocess.PIPE)
 | 
			
		||||
    stderr = kwargs.get('stderr', subprocess.PIPE)
 | 
			
		||||
    attempts = kwargs.get('attempts', 1)
 | 
			
		||||
    cmd = map(str, cmd)
 | 
			
		||||
 | 
			
		||||
    LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
 | 
			
		||||
    env = os.environ.copy()
 | 
			
		||||
    if addl_env:
 | 
			
		||||
        env.update(addl_env)
 | 
			
		||||
    obj = subprocess.Popen(cmd, stdin=stdin,
 | 
			
		||||
            stdout=stdout, stderr=stderr, env=env)
 | 
			
		||||
    result = None
 | 
			
		||||
    if process_input != None:
 | 
			
		||||
        result = obj.communicate(process_input)
 | 
			
		||||
    else:
 | 
			
		||||
        result = obj.communicate()
 | 
			
		||||
    obj.stdin.close()
 | 
			
		||||
    if obj.returncode:
 | 
			
		||||
        LOG.debug(_("Result was %s") % obj.returncode)
 | 
			
		||||
        if type(check_exit_code) == types.IntType \
 | 
			
		||||
                and obj.returncode != check_exit_code:
 | 
			
		||||
            (stdout, stderr) = result
 | 
			
		||||
            raise ProcessExecutionError(exit_code=obj.returncode,
 | 
			
		||||
                                        stdout=stdout,
 | 
			
		||||
                                        stderr=stderr,
 | 
			
		||||
                                        cmd=' '.join(cmd))
 | 
			
		||||
    # NOTE(termie): this appears to be necessary to let the subprocess call
 | 
			
		||||
    #               clean something up in between calls, without it two
 | 
			
		||||
    #               execute calls in a row hangs the second one
 | 
			
		||||
    greenthread.sleep(0)
 | 
			
		||||
    return result
 | 
			
		||||
    while attempts > 0:
 | 
			
		||||
        attempts -= 1
 | 
			
		||||
        try:
 | 
			
		||||
            LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
 | 
			
		||||
            env = os.environ.copy()
 | 
			
		||||
            if addl_env:
 | 
			
		||||
                env.update(addl_env)
 | 
			
		||||
            obj = subprocess.Popen(cmd, stdin=stdin,
 | 
			
		||||
                    stdout=stdout, stderr=stderr, env=env)
 | 
			
		||||
            result = None
 | 
			
		||||
            if process_input != None:
 | 
			
		||||
                result = obj.communicate(process_input)
 | 
			
		||||
            else:
 | 
			
		||||
                result = obj.communicate()
 | 
			
		||||
            obj.stdin.close()
 | 
			
		||||
            if obj.returncode:
 | 
			
		||||
                LOG.debug(_("Result was %s") % obj.returncode)
 | 
			
		||||
                if type(check_exit_code) == types.IntType \
 | 
			
		||||
                        and obj.returncode != check_exit_code:
 | 
			
		||||
                    (stdout, stderr) = result
 | 
			
		||||
                    raise ProcessExecutionError(exit_code=obj.returncode,
 | 
			
		||||
                                                stdout=stdout,
 | 
			
		||||
                                                stderr=stderr,
 | 
			
		||||
                                                cmd=' '.join(cmd))
 | 
			
		||||
            # NOTE(termie): this appears to be necessary to let the subprocess
 | 
			
		||||
            #               call clean something up in between calls, without
 | 
			
		||||
            #               it two execute calls in a row hangs the second one
 | 
			
		||||
            greenthread.sleep(0)
 | 
			
		||||
            return result
 | 
			
		||||
        except ProcessExecutionError:
 | 
			
		||||
            if not attempts:
 | 
			
		||||
                raise
 | 
			
		||||
            else:
 | 
			
		||||
                LOG.debug(_("%r failed. Retrying."), cmd)
 | 
			
		||||
                greenthread.sleep(random.randint(20, 200) / 100.0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ssh_execute(ssh, cmd, process_input=None,
 | 
			
		||||
 
 | 
			
		||||
@@ -51,7 +51,7 @@ def extend(image, size):
 | 
			
		||||
        return
 | 
			
		||||
    utils.execute('truncate', '-s', size, image)
 | 
			
		||||
    # NOTE(vish): attempts to resize filesystem
 | 
			
		||||
    utils.execute('e2fsck', '-fp', mage, check_exit_code=False)
 | 
			
		||||
    utils.execute('e2fsck', '-fp', image, check_exit_code=False)
 | 
			
		||||
    utils.execute('resize2fs', image, check_exit_code=False)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -189,4 +189,4 @@ def _inject_net_into_fs(net, fs):
 | 
			
		||||
    utils.execute('sudo', 'chown', 'root:root', netdir)
 | 
			
		||||
    utils.execute('sudo', 'chmod', 755, netdir)
 | 
			
		||||
    netfile = os.path.join(netdir, 'interfaces')
 | 
			
		||||
    utils.execute('sudo', 'tee', netfile, net)
 | 
			
		||||
    utils.execute('sudo', 'tee', netfile, process_input=net)
 | 
			
		||||
 
 | 
			
		||||
@@ -59,7 +59,6 @@ from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
#from nova import test
 | 
			
		||||
from nova import utils
 | 
			
		||||
#from nova.api import context
 | 
			
		||||
from nova.auth import manager
 | 
			
		||||
from nova.compute import instance_types
 | 
			
		||||
from nova.compute import power_state
 | 
			
		||||
@@ -479,7 +478,7 @@ class LibvirtConnection(object):
 | 
			
		||||
        console_log = os.path.join(FLAGS.instances_path, instance['name'],
 | 
			
		||||
                                   'console.log')
 | 
			
		||||
 | 
			
		||||
        utils.execute('sudo', 'chown', s.getuid(), console_log)
 | 
			
		||||
        utils.execute('sudo', 'chown', os.getuid(), console_log)
 | 
			
		||||
 | 
			
		||||
        if FLAGS.libvirt_type == 'xen':
 | 
			
		||||
            # Xen is special
 | 
			
		||||
@@ -1591,10 +1590,14 @@ class NWFilterFirewall(FirewallDriver):
 | 
			
		||||
 | 
			
		||||
class IptablesFirewallDriver(FirewallDriver):
 | 
			
		||||
    def __init__(self, execute=None, **kwargs):
 | 
			
		||||
        self.execute = execute or utils.execute
 | 
			
		||||
        from nova.network import linux_net
 | 
			
		||||
        self.iptables = linux_net.iptables_manager
 | 
			
		||||
        self.instances = {}
 | 
			
		||||
        self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
 | 
			
		||||
 | 
			
		||||
        self.iptables.ipv4['filter'].add_chain('sg-fallback')
 | 
			
		||||
        self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
 | 
			
		||||
 | 
			
		||||
    def setup_basic_filtering(self, instance):
 | 
			
		||||
        """Use NWFilter from libvirt for this."""
 | 
			
		||||
        return self.nwfilter.setup_basic_filtering(instance)
 | 
			
		||||
@@ -1603,128 +1606,96 @@ class IptablesFirewallDriver(FirewallDriver):
 | 
			
		||||
        """No-op. Everything is done in prepare_instance_filter"""
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def remove_instance(self, instance):
 | 
			
		||||
        if instance['id'] in self.instances:
 | 
			
		||||
            del self.instances[instance['id']]
 | 
			
		||||
    def unfilter_instance(self, instance):
 | 
			
		||||
        if self.instances.pop(instance['id'], None):
 | 
			
		||||
            self.remove_filters_for_instance(instance)
 | 
			
		||||
            self.iptables.apply()
 | 
			
		||||
        else:
 | 
			
		||||
            LOG.info(_('Attempted to unfilter instance %s which is not '
 | 
			
		||||
                       'filtered'), instance['id'])
 | 
			
		||||
 | 
			
		||||
    def add_instance(self, instance):
 | 
			
		||||
        self.instances[instance['id']] = instance
 | 
			
		||||
 | 
			
		||||
    def unfilter_instance(self, instance):
 | 
			
		||||
        self.remove_instance(instance)
 | 
			
		||||
        self.apply_ruleset()
 | 
			
		||||
                     'filtered'), instance['id'])
 | 
			
		||||
 | 
			
		||||
    def prepare_instance_filter(self, instance):
 | 
			
		||||
        self.add_instance(instance)
 | 
			
		||||
        self.apply_ruleset()
 | 
			
		||||
        self.instances[instance['id']] = instance
 | 
			
		||||
        self.add_filters_for_instance(instance)
 | 
			
		||||
        self.iptables.apply()
 | 
			
		||||
 | 
			
		||||
    def apply_ruleset(self):
 | 
			
		||||
        current_filter, _ = self.execute('sudo', 'iptables-save',
 | 
			
		||||
                                         '-t', 'filter')
 | 
			
		||||
        current_lines = current_filter.split('\n')
 | 
			
		||||
        new_filter = self.modify_rules(current_lines, 4)
 | 
			
		||||
        self.execute('sudo', 'iptables-restore',
 | 
			
		||||
                     process_input='\n'.join(new_filter))
 | 
			
		||||
        if(FLAGS.use_ipv6):
 | 
			
		||||
            current_filter, _ = self.execute('sudo', 'ip6tables-save',
 | 
			
		||||
                                             '-t', 'filter')
 | 
			
		||||
            current_lines = current_filter.split('\n')
 | 
			
		||||
            new_filter = self.modify_rules(current_lines, 6)
 | 
			
		||||
            self.execute('sudo', 'ip6tables-restore',
 | 
			
		||||
                         process_input='\n'.join(new_filter))
 | 
			
		||||
    def add_filters_for_instance(self, instance):
 | 
			
		||||
        chain_name = self._instance_chain_name(instance)
 | 
			
		||||
 | 
			
		||||
    def modify_rules(self, current_lines, ip_version=4):
 | 
			
		||||
        self.iptables.ipv4['filter'].add_chain(chain_name)
 | 
			
		||||
        ipv4_address = self._ip_for_instance(instance)
 | 
			
		||||
        self.iptables.ipv4['filter'].add_rule('local',
 | 
			
		||||
                                              '-d %s -j $%s' %
 | 
			
		||||
                                              (ipv4_address, chain_name))
 | 
			
		||||
 | 
			
		||||
        if FLAGS.use_ipv6:
 | 
			
		||||
            self.iptables.ipv6['filter'].add_chain(chain_name)
 | 
			
		||||
            ipv6_address = self._ip_for_instance_v6(instance)
 | 
			
		||||
            self.iptables.ipv6['filter'].add_rule('local',
 | 
			
		||||
                                                  '-d %s -j $%s' %
 | 
			
		||||
                                                  (ipv6_address,
 | 
			
		||||
                                                   chain_name))
 | 
			
		||||
 | 
			
		||||
        ipv4_rules, ipv6_rules = self.instance_rules(instance)
 | 
			
		||||
 | 
			
		||||
        for rule in ipv4_rules:
 | 
			
		||||
            self.iptables.ipv4['filter'].add_rule(chain_name, rule)
 | 
			
		||||
 | 
			
		||||
        if FLAGS.use_ipv6:
 | 
			
		||||
            for rule in ipv6_rules:
 | 
			
		||||
                self.iptables.ipv6['filter'].add_rule(chain_name, rule)
 | 
			
		||||
 | 
			
		||||
    def remove_filters_for_instance(self, instance):
 | 
			
		||||
        chain_name = self._instance_chain_name(instance)
 | 
			
		||||
 | 
			
		||||
        self.iptables.ipv4['filter'].remove_chain(chain_name)
 | 
			
		||||
        if FLAGS.use_ipv6:
 | 
			
		||||
            self.iptables.ipv6['filter'].remove_chain(chain_name)
 | 
			
		||||
 | 
			
		||||
    def instance_rules(self, instance):
 | 
			
		||||
        ctxt = context.get_admin_context()
 | 
			
		||||
        # Remove any trace of nova rules.
 | 
			
		||||
        new_filter = filter(lambda l: 'nova-' not in l, current_lines)
 | 
			
		||||
 | 
			
		||||
        seen_chains = False
 | 
			
		||||
        for rules_index in range(len(new_filter)):
 | 
			
		||||
            if not seen_chains:
 | 
			
		||||
                if new_filter[rules_index].startswith(':'):
 | 
			
		||||
                    seen_chains = True
 | 
			
		||||
            elif seen_chains == 1:
 | 
			
		||||
                if not new_filter[rules_index].startswith(':'):
 | 
			
		||||
                    break
 | 
			
		||||
        ipv4_rules = []
 | 
			
		||||
        ipv6_rules = []
 | 
			
		||||
 | 
			
		||||
        our_chains = [':nova-fallback - [0:0]']
 | 
			
		||||
        our_rules = ['-A nova-fallback -j DROP']
 | 
			
		||||
        # Always drop invalid packets
 | 
			
		||||
        ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
 | 
			
		||||
        ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
 | 
			
		||||
 | 
			
		||||
        our_chains += [':nova-local - [0:0]']
 | 
			
		||||
        our_rules += ['-A FORWARD -j nova-local']
 | 
			
		||||
        our_rules += ['-A OUTPUT -j nova-local']
 | 
			
		||||
        # Allow established connections
 | 
			
		||||
        ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
 | 
			
		||||
        ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
 | 
			
		||||
 | 
			
		||||
        security_groups = {}
 | 
			
		||||
        # Add our chains
 | 
			
		||||
        # First, we add instance chains and rules
 | 
			
		||||
        for instance_id in self.instances:
 | 
			
		||||
            instance = self.instances[instance_id]
 | 
			
		||||
            chain_name = self._instance_chain_name(instance)
 | 
			
		||||
            if(ip_version == 4):
 | 
			
		||||
                ip_address = self._ip_for_instance(instance)
 | 
			
		||||
            elif(ip_version == 6):
 | 
			
		||||
                ip_address = self._ip_for_instance_v6(instance)
 | 
			
		||||
        dhcp_server = self._dhcp_server_for_instance(instance)
 | 
			
		||||
        ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 '
 | 
			
		||||
                       '-j ACCEPT' % (dhcp_server,)]
 | 
			
		||||
 | 
			
		||||
            our_chains += [':%s - [0:0]' % chain_name]
 | 
			
		||||
        #Allow project network traffic
 | 
			
		||||
        if FLAGS.allow_project_net_traffic:
 | 
			
		||||
            cidr = self._project_cidr_for_instance(instance)
 | 
			
		||||
            ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)]
 | 
			
		||||
 | 
			
		||||
            # Jump to the per-instance chain
 | 
			
		||||
            our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
 | 
			
		||||
                                                         chain_name)]
 | 
			
		||||
        # We wrap these in FLAGS.use_ipv6 because they might cause
 | 
			
		||||
        # a DB lookup. The other ones are just list operations, so
 | 
			
		||||
        # they're not worth the clutter.
 | 
			
		||||
        if FLAGS.use_ipv6:
 | 
			
		||||
            # Allow RA responses
 | 
			
		||||
            ra_server = self._ra_server_for_instance(instance)
 | 
			
		||||
            if ra_server:
 | 
			
		||||
                ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (ra_server,)]
 | 
			
		||||
 | 
			
		||||
            # Always drop invalid packets
 | 
			
		||||
            our_rules += ['-A %s -m state --state '
 | 
			
		||||
                          'INVALID -j DROP' % (chain_name,)]
 | 
			
		||||
            #Allow project network traffic
 | 
			
		||||
            if FLAGS.allow_project_net_traffic:
 | 
			
		||||
                cidrv6 = self._project_cidrv6_for_instance(instance)
 | 
			
		||||
                ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)]
 | 
			
		||||
 | 
			
		||||
            # Allow established connections
 | 
			
		||||
            our_rules += ['-A %s -m state --state '
 | 
			
		||||
                          'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
 | 
			
		||||
 | 
			
		||||
            # Jump to each security group chain in turn
 | 
			
		||||
            for security_group in \
 | 
			
		||||
                            db.security_group_get_by_instance(ctxt,
 | 
			
		||||
                                                              instance['id']):
 | 
			
		||||
                security_groups[security_group['id']] = security_group
 | 
			
		||||
 | 
			
		||||
                sg_chain_name = self._security_group_chain_name(
 | 
			
		||||
                                                          security_group['id'])
 | 
			
		||||
 | 
			
		||||
                our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
 | 
			
		||||
 | 
			
		||||
            if(ip_version == 4):
 | 
			
		||||
                # Allow DHCP responses
 | 
			
		||||
                dhcp_server = self._dhcp_server_for_instance(instance)
 | 
			
		||||
                our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
 | 
			
		||||
                                    '-j ACCEPT ' % (chain_name, dhcp_server)]
 | 
			
		||||
                #Allow project network traffic
 | 
			
		||||
                if (FLAGS.allow_project_net_traffic):
 | 
			
		||||
                    cidr = self._project_cidr_for_instance(instance)
 | 
			
		||||
                    our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
 | 
			
		||||
            elif(ip_version == 6):
 | 
			
		||||
                # Allow RA responses
 | 
			
		||||
                ra_server = self._ra_server_for_instance(instance)
 | 
			
		||||
                if ra_server:
 | 
			
		||||
                    our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
 | 
			
		||||
                                  (chain_name, ra_server + "/128")]
 | 
			
		||||
                #Allow project network traffic
 | 
			
		||||
                if (FLAGS.allow_project_net_traffic):
 | 
			
		||||
                    cidrv6 = self._project_cidrv6_for_instance(instance)
 | 
			
		||||
                    our_rules += ['-A %s -s %s -j ACCEPT' %
 | 
			
		||||
                                        (chain_name, cidrv6)]
 | 
			
		||||
 | 
			
		||||
            # If nothing matches, jump to the fallback chain
 | 
			
		||||
            our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
 | 
			
		||||
        security_groups = db.security_group_get_by_instance(ctxt,
 | 
			
		||||
                                                            instance['id'])
 | 
			
		||||
 | 
			
		||||
        # then, security group chains and rules
 | 
			
		||||
        for security_group_id in security_groups:
 | 
			
		||||
            chain_name = self._security_group_chain_name(security_group_id)
 | 
			
		||||
            our_chains += [':%s - [0:0]' % chain_name]
 | 
			
		||||
 | 
			
		||||
            rules = \
 | 
			
		||||
              db.security_group_rule_get_by_security_group(ctxt,
 | 
			
		||||
                                                          security_group_id)
 | 
			
		||||
        for security_group in security_groups:
 | 
			
		||||
            rules = db.security_group_rule_get_by_security_group(ctxt,
 | 
			
		||||
                                                          security_group['id'])
 | 
			
		||||
 | 
			
		||||
            for rule in rules:
 | 
			
		||||
                logging.info('%r', rule)
 | 
			
		||||
@@ -1735,14 +1706,16 @@ class IptablesFirewallDriver(FirewallDriver):
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                version = _get_ip_version(rule.cidr)
 | 
			
		||||
                if version != ip_version:
 | 
			
		||||
                    continue
 | 
			
		||||
                if version == 4:
 | 
			
		||||
                    rules = ipv4_rules
 | 
			
		||||
                else:
 | 
			
		||||
                    rules = ipv6_rules
 | 
			
		||||
 | 
			
		||||
                protocol = rule.protocol
 | 
			
		||||
                if version == 6 and rule.protocol == 'icmp':
 | 
			
		||||
                    protocol = 'icmpv6'
 | 
			
		||||
 | 
			
		||||
                args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
 | 
			
		||||
                args = ['-p', protocol, '-s', rule.cidr]
 | 
			
		||||
 | 
			
		||||
                if rule.protocol in ['udp', 'tcp']:
 | 
			
		||||
                    if rule.from_port == rule.to_port:
 | 
			
		||||
@@ -1763,32 +1736,39 @@ class IptablesFirewallDriver(FirewallDriver):
 | 
			
		||||
                            icmp_type_arg += '/%s' % icmp_code
 | 
			
		||||
 | 
			
		||||
                    if icmp_type_arg:
 | 
			
		||||
                        if(ip_version == 4):
 | 
			
		||||
                        if version == 4:
 | 
			
		||||
                            args += ['-m', 'icmp', '--icmp-type',
 | 
			
		||||
                                     icmp_type_arg]
 | 
			
		||||
                        elif(ip_version == 6):
 | 
			
		||||
                        elif version == 6:
 | 
			
		||||
                            args += ['-m', 'icmp6', '--icmpv6-type',
 | 
			
		||||
                                     icmp_type_arg]
 | 
			
		||||
 | 
			
		||||
                args += ['-j ACCEPT']
 | 
			
		||||
                our_rules += [' '.join(args)]
 | 
			
		||||
                rules += [' '.join(args)]
 | 
			
		||||
 | 
			
		||||
        new_filter[rules_index:rules_index] = our_rules
 | 
			
		||||
        new_filter[rules_index:rules_index] = our_chains
 | 
			
		||||
        logging.info('new_filter: %s', '\n'.join(new_filter))
 | 
			
		||||
        return new_filter
 | 
			
		||||
        ipv4_rules += ['-j $sg-fallback']
 | 
			
		||||
        ipv6_rules += ['-j $sg-fallback']
 | 
			
		||||
 | 
			
		||||
        return ipv4_rules, ipv6_rules
 | 
			
		||||
 | 
			
		||||
    def refresh_security_group_members(self, security_group):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def refresh_security_group_rules(self, security_group):
 | 
			
		||||
        self.apply_ruleset()
 | 
			
		||||
        # We use the semaphore to make sure noone applies the rule set
 | 
			
		||||
        # after we've yanked the existing rules but before we've put in
 | 
			
		||||
        # the new ones.
 | 
			
		||||
        with self.iptables.semaphore:
 | 
			
		||||
            for instance in self.instances.values():
 | 
			
		||||
                self.remove_filters_for_instance(instance)
 | 
			
		||||
                self.add_filters_for_instance(instance)
 | 
			
		||||
        self.iptables.apply()
 | 
			
		||||
 | 
			
		||||
    def _security_group_chain_name(self, security_group_id):
 | 
			
		||||
        return 'nova-sg-%s' % (security_group_id,)
 | 
			
		||||
 | 
			
		||||
    def _instance_chain_name(self, instance):
 | 
			
		||||
        return 'nova-inst-%s' % (instance['id'],)
 | 
			
		||||
        return 'inst-%s' % (instance['id'],)
 | 
			
		||||
 | 
			
		||||
    def _ip_for_instance(self, instance):
 | 
			
		||||
        return db.instance_get_fixed_address(context.get_admin_context(),
 | 
			
		||||
 
 | 
			
		||||
@@ -41,9 +41,11 @@ from nova.virt.xenapi import HelperBase
 | 
			
		||||
from nova.virt.xenapi.volume_utils import StorageError
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
 | 
			
		||||
 | 
			
		||||
XENAPI_POWER_STATE = {
 | 
			
		||||
    'Halted': power_state.SHUTDOWN,
 | 
			
		||||
    'Running': power_state.RUNNING,
 | 
			
		||||
@@ -80,10 +82,19 @@ class VMHelper(HelperBase):
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
 | 
			
		||||
    def create_vm(cls, session, instance, kernel, ramdisk,
 | 
			
		||||
                  use_pv_kernel=False):
 | 
			
		||||
        """Create a VM record.  Returns a Deferred that gives the new
 | 
			
		||||
        VM reference.
 | 
			
		||||
        the pv_kernel flag indicates whether the guest is HVM or PV
 | 
			
		||||
        the use_pv_kernel flag indicates whether the guest is HVM or PV
 | 
			
		||||
 | 
			
		||||
        There are 3 scenarios:
 | 
			
		||||
 | 
			
		||||
            1. Using paravirtualization,  kernel passed in
 | 
			
		||||
 | 
			
		||||
            2. Using paravirtualization, kernel within the image
 | 
			
		||||
 | 
			
		||||
            3. Using hardware virtualization
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        instance_type = instance_types.\
 | 
			
		||||
@@ -91,52 +102,62 @@ class VMHelper(HelperBase):
 | 
			
		||||
        mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
 | 
			
		||||
        vcpus = str(instance_type['vcpus'])
 | 
			
		||||
        rec = {
 | 
			
		||||
            'name_label': instance.name,
 | 
			
		||||
            'name_description': '',
 | 
			
		||||
            'actions_after_crash': 'destroy',
 | 
			
		||||
            'actions_after_reboot': 'restart',
 | 
			
		||||
            'actions_after_shutdown': 'destroy',
 | 
			
		||||
            'affinity': '',
 | 
			
		||||
            'blocked_operations': {},
 | 
			
		||||
            'ha_always_run': False,
 | 
			
		||||
            'ha_restart_priority': '',
 | 
			
		||||
            'HVM_boot_params': {},
 | 
			
		||||
            'HVM_boot_policy': '',
 | 
			
		||||
            'is_a_template': False,
 | 
			
		||||
            'memory_static_min': '0',
 | 
			
		||||
            'memory_static_max': mem,
 | 
			
		||||
            'memory_dynamic_min': mem,
 | 
			
		||||
            'memory_dynamic_max': mem,
 | 
			
		||||
            'memory_static_min': '0',
 | 
			
		||||
            'memory_static_max': mem,
 | 
			
		||||
            'memory_target': mem,
 | 
			
		||||
            'name_description': '',
 | 
			
		||||
            'name_label': instance.name,
 | 
			
		||||
            'other_config': {'allowvssprovider': False},
 | 
			
		||||
            'other_config': {},
 | 
			
		||||
            'PCI_bus': '',
 | 
			
		||||
            'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
 | 
			
		||||
                         'viridian': 'true', 'timeoffset': '0'},
 | 
			
		||||
            'PV_args': '',
 | 
			
		||||
            'PV_bootloader': '',
 | 
			
		||||
            'PV_bootloader_args': '',
 | 
			
		||||
            'PV_kernel': '',
 | 
			
		||||
            'PV_legacy_args': '',
 | 
			
		||||
            'PV_ramdisk': '',
 | 
			
		||||
            'recommendations': '',
 | 
			
		||||
            'tags': [],
 | 
			
		||||
            'user_version': '0',
 | 
			
		||||
            'VCPUs_at_startup': vcpus,
 | 
			
		||||
            'VCPUs_max': vcpus,
 | 
			
		||||
            'VCPUs_params': {},
 | 
			
		||||
            'actions_after_shutdown': 'destroy',
 | 
			
		||||
            'actions_after_reboot': 'restart',
 | 
			
		||||
            'actions_after_crash': 'destroy',
 | 
			
		||||
            'PV_bootloader': '',
 | 
			
		||||
            'PV_kernel': '',
 | 
			
		||||
            'PV_ramdisk': '',
 | 
			
		||||
            'PV_args': '',
 | 
			
		||||
            'PV_bootloader_args': '',
 | 
			
		||||
            'PV_legacy_args': '',
 | 
			
		||||
            'HVM_boot_policy': '',
 | 
			
		||||
            'HVM_boot_params': {},
 | 
			
		||||
            'platform': {},
 | 
			
		||||
            'PCI_bus': '',
 | 
			
		||||
            'recommendations': '',
 | 
			
		||||
            'affinity': '',
 | 
			
		||||
            'user_version': '0',
 | 
			
		||||
            'other_config': {},
 | 
			
		||||
            'xenstore_data': {}
 | 
			
		||||
            }
 | 
			
		||||
        #Complete VM configuration record according to the image type
 | 
			
		||||
        #non-raw/raw with PV kernel/raw in HVM mode
 | 
			
		||||
        if instance.kernel_id:
 | 
			
		||||
            rec['PV_bootloader'] = ''
 | 
			
		||||
            rec['PV_kernel'] = kernel
 | 
			
		||||
            rec['PV_ramdisk'] = ramdisk
 | 
			
		||||
            rec['PV_args'] = 'root=/dev/xvda1'
 | 
			
		||||
            rec['PV_bootloader_args'] = ''
 | 
			
		||||
            rec['PV_legacy_args'] = ''
 | 
			
		||||
        else:
 | 
			
		||||
            if pv_kernel:
 | 
			
		||||
                rec['PV_args'] = 'noninteractive'
 | 
			
		||||
                rec['PV_bootloader'] = 'pygrub'
 | 
			
		||||
 | 
			
		||||
        # Complete VM configuration record according to the image type
 | 
			
		||||
        # non-raw/raw with PV kernel/raw in HVM mode
 | 
			
		||||
        if use_pv_kernel:
 | 
			
		||||
            rec['platform']['nx'] = 'false'
 | 
			
		||||
            if instance.kernel_id:
 | 
			
		||||
                # 1. Kernel explicitly passed in, use that
 | 
			
		||||
                rec['PV_args'] = 'root=/dev/xvda1'
 | 
			
		||||
                rec['PV_kernel'] = kernel
 | 
			
		||||
                rec['PV_ramdisk'] = ramdisk
 | 
			
		||||
            else:
 | 
			
		||||
                rec['HVM_boot_policy'] = 'BIOS order'
 | 
			
		||||
                rec['HVM_boot_params'] = {'order': 'dc'}
 | 
			
		||||
                rec['platform'] = {'acpi': 'true', 'apic': 'true',
 | 
			
		||||
                                   'pae': 'true', 'viridian': 'true'}
 | 
			
		||||
                # 2. Use kernel within the image
 | 
			
		||||
                rec['PV_args'] = 'clocksource=jiffies'
 | 
			
		||||
                rec['PV_bootloader'] = 'pygrub'
 | 
			
		||||
        else:
 | 
			
		||||
            # 3. Using hardware virtualization
 | 
			
		||||
            rec['platform']['nx'] = 'true'
 | 
			
		||||
            rec['HVM_boot_params'] = {'order': 'dc'}
 | 
			
		||||
            rec['HVM_boot_policy'] = 'BIOS order'
 | 
			
		||||
 | 
			
		||||
        LOG.debug(_('Created VM %s...'), instance.name)
 | 
			
		||||
        vm_ref = session.call_xenapi('VM.create', rec)
 | 
			
		||||
        instance_name = instance.name
 | 
			
		||||
@@ -181,13 +202,13 @@ class VMHelper(HelperBase):
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def find_vbd_by_number(cls, session, vm_ref, number):
 | 
			
		||||
        """Get the VBD reference from the device number"""
 | 
			
		||||
        vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
 | 
			
		||||
        if vbds:
 | 
			
		||||
            for vbd in vbds:
 | 
			
		||||
        vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
 | 
			
		||||
        if vbd_refs:
 | 
			
		||||
            for vbd_ref in vbd_refs:
 | 
			
		||||
                try:
 | 
			
		||||
                    vbd_rec = session.get_xenapi().VBD.get_record(vbd)
 | 
			
		||||
                    vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref)
 | 
			
		||||
                    if vbd_rec['userdevice'] == str(number):
 | 
			
		||||
                        return vbd
 | 
			
		||||
                        return vbd_ref
 | 
			
		||||
                except cls.XenAPI.Failure, exc:
 | 
			
		||||
                    LOG.exception(exc)
 | 
			
		||||
        raise StorageError(_('VBD not found in instance %s') % vm_ref)
 | 
			
		||||
@@ -319,7 +340,7 @@ class VMHelper(HelperBase):
 | 
			
		||||
        return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def upload_image(cls, session, instance_id, vdi_uuids, image_id):
 | 
			
		||||
    def upload_image(cls, session, instance, vdi_uuids, image_id):
 | 
			
		||||
        """ Requests that the Glance plugin bundle the specified VDIs and
 | 
			
		||||
        push them into Glance using the specified human-friendly name.
 | 
			
		||||
        """
 | 
			
		||||
@@ -328,15 +349,18 @@ class VMHelper(HelperBase):
 | 
			
		||||
        logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
 | 
			
		||||
                " ID %(image_id)s") % locals())
 | 
			
		||||
 | 
			
		||||
        os_type = instance.os_type or FLAGS.default_os_type
 | 
			
		||||
 | 
			
		||||
        params = {'vdi_uuids': vdi_uuids,
 | 
			
		||||
                  'image_id': image_id,
 | 
			
		||||
                  'glance_host': FLAGS.glance_host,
 | 
			
		||||
                  'glance_port': FLAGS.glance_port,
 | 
			
		||||
                  'sr_path': cls.get_sr_path(session)}
 | 
			
		||||
                  'sr_path': cls.get_sr_path(session),
 | 
			
		||||
                  'os_type': os_type}
 | 
			
		||||
 | 
			
		||||
        kwargs = {'params': pickle.dumps(params)}
 | 
			
		||||
        task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
 | 
			
		||||
        session.wait_for_task(task, instance_id)
 | 
			
		||||
        session.wait_for_task(task, instance.id)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def fetch_image(cls, session, instance_id, image, user, project,
 | 
			
		||||
@@ -419,29 +443,29 @@ class VMHelper(HelperBase):
 | 
			
		||||
            vdi_size += MBR_SIZE_BYTES
 | 
			
		||||
 | 
			
		||||
        name_label = get_name_label_for_image(image)
 | 
			
		||||
        vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
 | 
			
		||||
        vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
 | 
			
		||||
 | 
			
		||||
        with_vdi_attached_here(session, vdi, False,
 | 
			
		||||
        with_vdi_attached_here(session, vdi_ref, False,
 | 
			
		||||
                               lambda dev:
 | 
			
		||||
                               _stream_disk(dev, image_type,
 | 
			
		||||
                                            virtual_size, image_file))
 | 
			
		||||
        if image_type == ImageType.KERNEL_RAMDISK:
 | 
			
		||||
            #we need to invoke a plugin for copying VDI's
 | 
			
		||||
            #content into proper path
 | 
			
		||||
            LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
 | 
			
		||||
            LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
 | 
			
		||||
            fn = "copy_kernel_vdi"
 | 
			
		||||
            args = {}
 | 
			
		||||
            args['vdi-ref'] = vdi
 | 
			
		||||
            args['vdi-ref'] = vdi_ref
 | 
			
		||||
            #let the plugin copy the correct number of bytes
 | 
			
		||||
            args['image-size'] = str(vdi_size)
 | 
			
		||||
            task = session.async_call_plugin('glance', fn, args)
 | 
			
		||||
            filename = session.wait_for_task(task, instance_id)
 | 
			
		||||
            #remove the VDI as it is not needed anymore
 | 
			
		||||
            session.get_xenapi().VDI.destroy(vdi)
 | 
			
		||||
            LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
 | 
			
		||||
            session.get_xenapi().VDI.destroy(vdi_ref)
 | 
			
		||||
            LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
 | 
			
		||||
            return filename
 | 
			
		||||
        else:
 | 
			
		||||
            return session.get_xenapi().VDI.get_uuid(vdi)
 | 
			
		||||
            return session.get_xenapi().VDI.get_uuid(vdi_ref)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def determine_disk_image_type(cls, instance):
 | 
			
		||||
@@ -533,17 +557,33 @@ class VMHelper(HelperBase):
 | 
			
		||||
        return uuid
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def lookup_image(cls, session, instance_id, vdi_ref):
 | 
			
		||||
    def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
 | 
			
		||||
                        os_type):
 | 
			
		||||
        """
 | 
			
		||||
        Determine if VDI is using a PV kernel
 | 
			
		||||
        Determine whether the VM will use a paravirtualized kernel or if it
 | 
			
		||||
        will use hardware virtualization.
 | 
			
		||||
 | 
			
		||||
            1. Objectstore (any image type):
 | 
			
		||||
               We use plugin to figure out whether the VDI uses PV
 | 
			
		||||
 | 
			
		||||
            2. Glance (VHD): then we use `os_type`, raise if not set
 | 
			
		||||
 | 
			
		||||
            3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
 | 
			
		||||
               available
 | 
			
		||||
 | 
			
		||||
            4. Glance (DISK): pv is assumed
 | 
			
		||||
        """
 | 
			
		||||
        if FLAGS.xenapi_image_service == 'glance':
 | 
			
		||||
            return cls._lookup_image_glance(session, vdi_ref)
 | 
			
		||||
            # 2, 3, 4: Glance
 | 
			
		||||
            return cls._determine_is_pv_glance(
 | 
			
		||||
              session, vdi_ref, disk_image_type, os_type)
 | 
			
		||||
        else:
 | 
			
		||||
            return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
 | 
			
		||||
            # 1. Objecstore
 | 
			
		||||
            return cls._determine_is_pv_objectstore(session, instance_id,
 | 
			
		||||
                                                    vdi_ref)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
 | 
			
		||||
    def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
 | 
			
		||||
        LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
 | 
			
		||||
        fn = "is_vdi_pv"
 | 
			
		||||
        args = {}
 | 
			
		||||
@@ -559,42 +599,72 @@ class VMHelper(HelperBase):
 | 
			
		||||
        return pv
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _lookup_image_glance(cls, session, vdi_ref):
 | 
			
		||||
    def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
 | 
			
		||||
                                os_type):
 | 
			
		||||
        """
 | 
			
		||||
        For a Glance image, determine if we need paravirtualization.
 | 
			
		||||
 | 
			
		||||
        The relevant scenarios are:
 | 
			
		||||
            2. Glance (VHD): then we use `os_type`, raise if not set
 | 
			
		||||
 | 
			
		||||
            3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
 | 
			
		||||
               available
 | 
			
		||||
 | 
			
		||||
            4. Glance (DISK): pv is assumed
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
 | 
			
		||||
        return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
 | 
			
		||||
        if disk_image_type == ImageType.DISK_VHD:
 | 
			
		||||
            # 2. VHD
 | 
			
		||||
            if os_type == 'windows':
 | 
			
		||||
                is_pv = False
 | 
			
		||||
            else:
 | 
			
		||||
                is_pv = True
 | 
			
		||||
        elif disk_image_type == ImageType.DISK_RAW:
 | 
			
		||||
            # 3. RAW
 | 
			
		||||
            is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
 | 
			
		||||
        elif disk_image_type == ImageType.DISK:
 | 
			
		||||
            # 4. Disk
 | 
			
		||||
            is_pv = True
 | 
			
		||||
        else:
 | 
			
		||||
            raise exception.Error(_("Unknown image format %(disk_image_type)s")
 | 
			
		||||
                                  % locals())
 | 
			
		||||
 | 
			
		||||
        return is_pv
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def lookup(cls, session, i):
 | 
			
		||||
    def lookup(cls, session, name_label):
 | 
			
		||||
        """Look the instance i up, and returns it if available"""
 | 
			
		||||
        vms = session.get_xenapi().VM.get_by_name_label(i)
 | 
			
		||||
        n = len(vms)
 | 
			
		||||
        vm_refs = session.get_xenapi().VM.get_by_name_label(name_label)
 | 
			
		||||
        n = len(vm_refs)
 | 
			
		||||
        if n == 0:
 | 
			
		||||
            return None
 | 
			
		||||
        elif n > 1:
 | 
			
		||||
            raise exception.Duplicate(_('duplicate name found: %s') % i)
 | 
			
		||||
            raise exception.Duplicate(_('duplicate name found: %s') %
 | 
			
		||||
                                        name_label)
 | 
			
		||||
        else:
 | 
			
		||||
            return vms[0]
 | 
			
		||||
            return vm_refs[0]
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def lookup_vm_vdis(cls, session, vm):
 | 
			
		||||
    def lookup_vm_vdis(cls, session, vm_ref):
 | 
			
		||||
        """Look for the VDIs that are attached to the VM"""
 | 
			
		||||
        # Firstly we get the VBDs, then the VDIs.
 | 
			
		||||
        # TODO(Armando): do we leave the read-only devices?
 | 
			
		||||
        vbds = session.get_xenapi().VM.get_VBDs(vm)
 | 
			
		||||
        vdis = []
 | 
			
		||||
        if vbds:
 | 
			
		||||
            for vbd in vbds:
 | 
			
		||||
        vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
 | 
			
		||||
        vdi_refs = []
 | 
			
		||||
        if vbd_refs:
 | 
			
		||||
            for vbd_ref in vbd_refs:
 | 
			
		||||
                try:
 | 
			
		||||
                    vdi = session.get_xenapi().VBD.get_VDI(vbd)
 | 
			
		||||
                    vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
 | 
			
		||||
                    # Test valid VDI
 | 
			
		||||
                    record = session.get_xenapi().VDI.get_record(vdi)
 | 
			
		||||
                    record = session.get_xenapi().VDI.get_record(vdi_ref)
 | 
			
		||||
                    LOG.debug(_('VDI %s is still available'), record['uuid'])
 | 
			
		||||
                except cls.XenAPI.Failure, exc:
 | 
			
		||||
                    LOG.exception(exc)
 | 
			
		||||
                else:
 | 
			
		||||
                    vdis.append(vdi)
 | 
			
		||||
            if len(vdis) > 0:
 | 
			
		||||
                return vdis
 | 
			
		||||
                    vdi_refs.append(vdi_ref)
 | 
			
		||||
            if len(vdi_refs) > 0:
 | 
			
		||||
                return vdi_refs
 | 
			
		||||
            else:
 | 
			
		||||
                return None
 | 
			
		||||
 | 
			
		||||
@@ -770,16 +840,16 @@ def safe_find_sr(session):
 | 
			
		||||
def find_sr(session):
 | 
			
		||||
    """Return the storage repository to hold VM images"""
 | 
			
		||||
    host = session.get_xenapi_host()
 | 
			
		||||
    srs = session.get_xenapi().SR.get_all()
 | 
			
		||||
    for sr in srs:
 | 
			
		||||
        sr_rec = session.get_xenapi().SR.get_record(sr)
 | 
			
		||||
    sr_refs = session.get_xenapi().SR.get_all()
 | 
			
		||||
    for sr_ref in sr_refs:
 | 
			
		||||
        sr_rec = session.get_xenapi().SR.get_record(sr_ref)
 | 
			
		||||
        if not ('i18n-key' in sr_rec['other_config'] and
 | 
			
		||||
                sr_rec['other_config']['i18n-key'] == 'local-storage'):
 | 
			
		||||
            continue
 | 
			
		||||
        for pbd in sr_rec['PBDs']:
 | 
			
		||||
            pbd_rec = session.get_xenapi().PBD.get_record(pbd)
 | 
			
		||||
        for pbd_ref in sr_rec['PBDs']:
 | 
			
		||||
            pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
 | 
			
		||||
            if pbd_rec['host'] == host:
 | 
			
		||||
                return sr
 | 
			
		||||
                return sr_ref
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -804,11 +874,11 @@ def remap_vbd_dev(dev):
 | 
			
		||||
    return remapped_dev
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def with_vdi_attached_here(session, vdi, read_only, f):
 | 
			
		||||
def with_vdi_attached_here(session, vdi_ref, read_only, f):
 | 
			
		||||
    this_vm_ref = get_this_vm_ref(session)
 | 
			
		||||
    vbd_rec = {}
 | 
			
		||||
    vbd_rec['VM'] = this_vm_ref
 | 
			
		||||
    vbd_rec['VDI'] = vdi
 | 
			
		||||
    vbd_rec['VDI'] = vdi_ref
 | 
			
		||||
    vbd_rec['userdevice'] = 'autodetect'
 | 
			
		||||
    vbd_rec['bootable'] = False
 | 
			
		||||
    vbd_rec['mode'] = read_only and 'RO' or 'RW'
 | 
			
		||||
@@ -819,28 +889,28 @@ def with_vdi_attached_here(session, vdi, read_only, f):
 | 
			
		||||
    vbd_rec['qos_algorithm_type'] = ''
 | 
			
		||||
    vbd_rec['qos_algorithm_params'] = {}
 | 
			
		||||
    vbd_rec['qos_supported_algorithms'] = []
 | 
			
		||||
    LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
 | 
			
		||||
    vbd = session.get_xenapi().VBD.create(vbd_rec)
 | 
			
		||||
    LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
 | 
			
		||||
    LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref)
 | 
			
		||||
    vbd_ref = session.get_xenapi().VBD.create(vbd_rec)
 | 
			
		||||
    LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref)
 | 
			
		||||
    try:
 | 
			
		||||
        LOG.debug(_('Plugging VBD %s ... '), vbd)
 | 
			
		||||
        session.get_xenapi().VBD.plug(vbd)
 | 
			
		||||
        LOG.debug(_('Plugging VBD %s done.'), vbd)
 | 
			
		||||
        orig_dev = session.get_xenapi().VBD.get_device(vbd)
 | 
			
		||||
        LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
 | 
			
		||||
        LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
 | 
			
		||||
        session.get_xenapi().VBD.plug(vbd_ref)
 | 
			
		||||
        LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
 | 
			
		||||
        orig_dev = session.get_xenapi().VBD.get_device(vbd_ref)
 | 
			
		||||
        LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
 | 
			
		||||
        dev = remap_vbd_dev(orig_dev)
 | 
			
		||||
        if dev != orig_dev:
 | 
			
		||||
            LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
 | 
			
		||||
            LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
 | 
			
		||||
                        'remapping to %(dev)s') % locals())
 | 
			
		||||
        return f(dev)
 | 
			
		||||
    finally:
 | 
			
		||||
        LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
 | 
			
		||||
        vbd_unplug_with_retry(session, vbd)
 | 
			
		||||
        ignore_failure(session.get_xenapi().VBD.destroy, vbd)
 | 
			
		||||
        LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
 | 
			
		||||
        LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
 | 
			
		||||
        vbd_unplug_with_retry(session, vbd_ref)
 | 
			
		||||
        ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref)
 | 
			
		||||
        LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def vbd_unplug_with_retry(session, vbd):
 | 
			
		||||
def vbd_unplug_with_retry(session, vbd_ref):
 | 
			
		||||
    """Call VBD.unplug on the given VBD, with a retry if we get
 | 
			
		||||
    DEVICE_DETACH_REJECTED.  For reasons which I don't understand, we're
 | 
			
		||||
    seeing the device still in use, even when all processes using the device
 | 
			
		||||
@@ -848,7 +918,7 @@ def vbd_unplug_with_retry(session, vbd):
 | 
			
		||||
    # FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
 | 
			
		||||
    while True:
 | 
			
		||||
        try:
 | 
			
		||||
            session.get_xenapi().VBD.unplug(vbd)
 | 
			
		||||
            session.get_xenapi().VBD.unplug(vbd_ref)
 | 
			
		||||
            LOG.debug(_('VBD.unplug successful first time.'))
 | 
			
		||||
            return
 | 
			
		||||
        except VMHelper.XenAPI.Failure, e:
 | 
			
		||||
 
 | 
			
		||||
@@ -55,12 +55,12 @@ class VMOps(object):
 | 
			
		||||
 | 
			
		||||
    def list_instances(self):
 | 
			
		||||
        """List VM instances"""
 | 
			
		||||
        vms = []
 | 
			
		||||
        for vm in self._session.get_xenapi().VM.get_all():
 | 
			
		||||
            rec = self._session.get_xenapi().VM.get_record(vm)
 | 
			
		||||
            if not rec["is_a_template"] and not rec["is_control_domain"]:
 | 
			
		||||
                vms.append(rec["name_label"])
 | 
			
		||||
        return vms
 | 
			
		||||
        vm_refs = []
 | 
			
		||||
        for vm_ref in self._session.get_xenapi().VM.get_all():
 | 
			
		||||
            vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
 | 
			
		||||
            if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
 | 
			
		||||
                vm_refs.append(vm_rec["name_label"])
 | 
			
		||||
        return vm_refs
 | 
			
		||||
 | 
			
		||||
    def _start(self, instance, vm_ref=None):
 | 
			
		||||
        """Power on a VM instance"""
 | 
			
		||||
@@ -87,8 +87,8 @@ class VMOps(object):
 | 
			
		||||
    def _spawn_with_disk(self, instance, vdi_uuid):
 | 
			
		||||
        """Create VM instance"""
 | 
			
		||||
        instance_name = instance.name
 | 
			
		||||
        vm = VMHelper.lookup(self._session, instance_name)
 | 
			
		||||
        if vm is not None:
 | 
			
		||||
        vm_ref = VMHelper.lookup(self._session, instance_name)
 | 
			
		||||
        if vm_ref is not None:
 | 
			
		||||
            raise exception.Duplicate(_('Attempted to create'
 | 
			
		||||
                    ' non-unique name %s') % instance_name)
 | 
			
		||||
 | 
			
		||||
@@ -104,31 +104,26 @@ class VMOps(object):
 | 
			
		||||
        user = AuthManager().get_user(instance.user_id)
 | 
			
		||||
        project = AuthManager().get_project(instance.project_id)
 | 
			
		||||
 | 
			
		||||
        kernel = ramdisk = pv_kernel = None
 | 
			
		||||
 | 
			
		||||
        # Are we building from a pre-existing disk?
 | 
			
		||||
        vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
 | 
			
		||||
 | 
			
		||||
        disk_image_type = VMHelper.determine_disk_image_type(instance)
 | 
			
		||||
        if disk_image_type == ImageType.DISK_RAW:
 | 
			
		||||
            # Have a look at the VDI and see if it has a PV kernel
 | 
			
		||||
            pv_kernel = VMHelper.lookup_image(self._session, instance.id,
 | 
			
		||||
                                              vdi_ref)
 | 
			
		||||
        elif disk_image_type == ImageType.DISK_VHD:
 | 
			
		||||
            # TODO(sirp): Assuming PV for now; this will need to be
 | 
			
		||||
            # configurable as Windows will use HVM.
 | 
			
		||||
            pv_kernel = True
 | 
			
		||||
 | 
			
		||||
        kernel = None
 | 
			
		||||
        if instance.kernel_id:
 | 
			
		||||
            kernel = VMHelper.fetch_image(self._session, instance.id,
 | 
			
		||||
                instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
 | 
			
		||||
 | 
			
		||||
        ramdisk = None
 | 
			
		||||
        if instance.ramdisk_id:
 | 
			
		||||
            ramdisk = VMHelper.fetch_image(self._session, instance.id,
 | 
			
		||||
                instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
 | 
			
		||||
 | 
			
		||||
        vm_ref = VMHelper.create_vm(self._session,
 | 
			
		||||
                                          instance, kernel, ramdisk, pv_kernel)
 | 
			
		||||
        use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
 | 
			
		||||
            vdi_ref, disk_image_type, instance.os_type)
 | 
			
		||||
        vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
 | 
			
		||||
                                    use_pv_kernel)
 | 
			
		||||
 | 
			
		||||
        VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
 | 
			
		||||
                vdi_ref=vdi_ref, userdevice=0, bootable=True)
 | 
			
		||||
 | 
			
		||||
@@ -266,7 +261,7 @@ class VMOps(object):
 | 
			
		||||
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
 | 
			
		||||
            # call plugin to ship snapshot off to glance
 | 
			
		||||
            VMHelper.upload_image(
 | 
			
		||||
                    self._session, instance.id, template_vdi_uuids, image_id)
 | 
			
		||||
                    self._session, instance, template_vdi_uuids, image_id)
 | 
			
		||||
        finally:
 | 
			
		||||
            if template_vm_ref:
 | 
			
		||||
                self._destroy(instance, template_vm_ref,
 | 
			
		||||
@@ -371,8 +366,8 @@ class VMOps(object):
 | 
			
		||||
 | 
			
		||||
    def reboot(self, instance):
 | 
			
		||||
        """Reboot VM instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
 | 
			
		||||
        self._session.wait_for_task(task, instance.id)
 | 
			
		||||
 | 
			
		||||
    def set_admin_password(self, instance, new_pass):
 | 
			
		||||
@@ -439,7 +434,7 @@ class VMOps(object):
 | 
			
		||||
            raise RuntimeError(resp_dict['message'])
 | 
			
		||||
        return resp_dict['message']
 | 
			
		||||
 | 
			
		||||
    def _shutdown(self, instance, vm, hard=True):
 | 
			
		||||
    def _shutdown(self, instance, vm_ref, hard=True):
 | 
			
		||||
        """Shutdown an instance"""
 | 
			
		||||
        state = self.get_info(instance['name'])['state']
 | 
			
		||||
        if state == power_state.SHUTDOWN:
 | 
			
		||||
@@ -453,31 +448,33 @@ class VMOps(object):
 | 
			
		||||
        try:
 | 
			
		||||
            task = None
 | 
			
		||||
            if hard:
 | 
			
		||||
                task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
 | 
			
		||||
                task = self._session.call_xenapi("Async.VM.hard_shutdown",
 | 
			
		||||
                                                 vm_ref)
 | 
			
		||||
            else:
 | 
			
		||||
                task = self._session.call_xenapi('Async.VM.clean_shutdown', vm)
 | 
			
		||||
                task = self._session.call_xenapi("Async.VM.clean_shutdown",
 | 
			
		||||
                                                 vm_ref)
 | 
			
		||||
            self._session.wait_for_task(task, instance.id)
 | 
			
		||||
        except self.XenAPI.Failure, exc:
 | 
			
		||||
            LOG.exception(exc)
 | 
			
		||||
 | 
			
		||||
    def _destroy_vdis(self, instance, vm):
 | 
			
		||||
        """Destroys all VDIs associated with a VM """
 | 
			
		||||
    def _destroy_vdis(self, instance, vm_ref):
 | 
			
		||||
        """Destroys all VDIs associated with a VM"""
 | 
			
		||||
        instance_id = instance.id
 | 
			
		||||
        LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
 | 
			
		||||
                  % locals())
 | 
			
		||||
        vdis = VMHelper.lookup_vm_vdis(self._session, vm)
 | 
			
		||||
        vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)
 | 
			
		||||
 | 
			
		||||
        if not vdis:
 | 
			
		||||
        if not vdi_refs:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        for vdi in vdis:
 | 
			
		||||
        for vdi_ref in vdi_refs:
 | 
			
		||||
            try:
 | 
			
		||||
                task = self._session.call_xenapi('Async.VDI.destroy', vdi)
 | 
			
		||||
                task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
 | 
			
		||||
                self._session.wait_for_task(task, instance.id)
 | 
			
		||||
            except self.XenAPI.Failure, exc:
 | 
			
		||||
                LOG.exception(exc)
 | 
			
		||||
 | 
			
		||||
    def _destroy_kernel_ramdisk(self, instance, vm):
 | 
			
		||||
    def _destroy_kernel_ramdisk(self, instance, vm_ref):
 | 
			
		||||
        """
 | 
			
		||||
        Three situations can occur:
 | 
			
		||||
 | 
			
		||||
@@ -504,8 +501,8 @@ class VMOps(object):
 | 
			
		||||
                  "both" % locals()))
 | 
			
		||||
 | 
			
		||||
        # 3. We have both kernel and ramdisk
 | 
			
		||||
        (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
 | 
			
		||||
            self._session, vm)
 | 
			
		||||
        (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
 | 
			
		||||
                                                           vm_ref)
 | 
			
		||||
 | 
			
		||||
        LOG.debug(_("Removing kernel/ramdisk files"))
 | 
			
		||||
 | 
			
		||||
@@ -516,11 +513,11 @@ class VMOps(object):
 | 
			
		||||
 | 
			
		||||
        LOG.debug(_("kernel/ramdisk files removed"))
 | 
			
		||||
 | 
			
		||||
    def _destroy_vm(self, instance, vm):
 | 
			
		||||
        """Destroys a VM record """
 | 
			
		||||
    def _destroy_vm(self, instance, vm_ref):
 | 
			
		||||
        """Destroys a VM record"""
 | 
			
		||||
        instance_id = instance.id
 | 
			
		||||
        try:
 | 
			
		||||
            task = self._session.call_xenapi('Async.VM.destroy', vm)
 | 
			
		||||
            task = self._session.call_xenapi('Async.VM.destroy', vm_ref)
 | 
			
		||||
            self._session.wait_for_task(task, instance_id)
 | 
			
		||||
        except self.XenAPI.Failure, exc:
 | 
			
		||||
            LOG.exception(exc)
 | 
			
		||||
@@ -536,10 +533,10 @@ class VMOps(object):
 | 
			
		||||
        """
 | 
			
		||||
        instance_id = instance.id
 | 
			
		||||
        LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
 | 
			
		||||
        vm = VMHelper.lookup(self._session, instance.name)
 | 
			
		||||
        return self._destroy(instance, vm, shutdown=True)
 | 
			
		||||
        vm_ref = VMHelper.lookup(self._session, instance.name)
 | 
			
		||||
        return self._destroy(instance, vm_ref, shutdown=True)
 | 
			
		||||
 | 
			
		||||
    def _destroy(self, instance, vm, shutdown=True,
 | 
			
		||||
    def _destroy(self, instance, vm_ref, shutdown=True,
 | 
			
		||||
                 destroy_kernel_ramdisk=True):
 | 
			
		||||
        """
 | 
			
		||||
        Destroys VM instance by performing:
 | 
			
		||||
@@ -549,17 +546,17 @@ class VMOps(object):
 | 
			
		||||
            3. Destroying kernel and ramdisk files (if necessary)
 | 
			
		||||
            4. Destroying that actual VM record
 | 
			
		||||
        """
 | 
			
		||||
        if vm is None:
 | 
			
		||||
        if vm_ref is None:
 | 
			
		||||
            LOG.warning(_("VM is not present, skipping destroy..."))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        if shutdown:
 | 
			
		||||
            self._shutdown(instance, vm)
 | 
			
		||||
            self._shutdown(instance, vm_ref)
 | 
			
		||||
 | 
			
		||||
        self._destroy_vdis(instance, vm)
 | 
			
		||||
        self._destroy_vdis(instance, vm_ref)
 | 
			
		||||
        if destroy_kernel_ramdisk:
 | 
			
		||||
            self._destroy_kernel_ramdisk(instance, vm)
 | 
			
		||||
        self._destroy_vm(instance, vm)
 | 
			
		||||
            self._destroy_kernel_ramdisk(instance, vm_ref)
 | 
			
		||||
        self._destroy_vm(instance, vm_ref)
 | 
			
		||||
 | 
			
		||||
    def _wait_with_callback(self, instance_id, task, callback):
 | 
			
		||||
        ret = None
 | 
			
		||||
@@ -571,26 +568,27 @@ class VMOps(object):
 | 
			
		||||
 | 
			
		||||
    def pause(self, instance, callback):
 | 
			
		||||
        """Pause VM instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.pause', vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.pause', vm_ref)
 | 
			
		||||
        self._wait_with_callback(instance.id, task, callback)
 | 
			
		||||
 | 
			
		||||
    def unpause(self, instance, callback):
 | 
			
		||||
        """Unpause VM instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.unpause', vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.unpause', vm_ref)
 | 
			
		||||
        self._wait_with_callback(instance.id, task, callback)
 | 
			
		||||
 | 
			
		||||
    def suspend(self, instance, callback):
 | 
			
		||||
        """suspend the specified instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.suspend', vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.suspend', vm_ref)
 | 
			
		||||
        self._wait_with_callback(instance.id, task, callback)
 | 
			
		||||
 | 
			
		||||
    def resume(self, instance, callback):
 | 
			
		||||
        """resume the specified instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        task = self._session.call_xenapi('Async.VM.resume', vm_ref, False,
 | 
			
		||||
                                         True)
 | 
			
		||||
        self._wait_with_callback(instance.id, task, callback)
 | 
			
		||||
 | 
			
		||||
    def rescue(self, instance, callback):
 | 
			
		||||
@@ -600,29 +598,26 @@ class VMOps(object):
 | 
			
		||||
            - spawn a rescue VM (the vm name-label will be instance-N-rescue)
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
 | 
			
		||||
        if rescue_vm:
 | 
			
		||||
        rescue_vm_ref = VMHelper.lookup(self._session,
 | 
			
		||||
                                        instance.name + "-rescue")
 | 
			
		||||
        if rescue_vm_ref:
 | 
			
		||||
            raise RuntimeError(_(
 | 
			
		||||
                "Instance is already in Rescue Mode: %s" % instance.name))
 | 
			
		||||
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        self._shutdown(instance, vm)
 | 
			
		||||
        self._acquire_bootlock(vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        self._shutdown(instance, vm_ref)
 | 
			
		||||
        self._acquire_bootlock(vm_ref)
 | 
			
		||||
 | 
			
		||||
        instance._rescue = True
 | 
			
		||||
        self.spawn(instance)
 | 
			
		||||
        rescue_vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        rescue_vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
 | 
			
		||||
        vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
 | 
			
		||||
        vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
 | 
			
		||||
        vbd_ref = VMHelper.create_vbd(
 | 
			
		||||
            self._session,
 | 
			
		||||
            rescue_vm,
 | 
			
		||||
            vdi_ref,
 | 
			
		||||
            1,
 | 
			
		||||
            False)
 | 
			
		||||
        vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
 | 
			
		||||
        vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
 | 
			
		||||
        rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
 | 
			
		||||
                                             vdi_ref, 1, False)
 | 
			
		||||
 | 
			
		||||
        self._session.call_xenapi("Async.VBD.plug", vbd_ref)
 | 
			
		||||
        self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
 | 
			
		||||
 | 
			
		||||
    def unrescue(self, instance, callback):
 | 
			
		||||
        """Unrescue the specified instance
 | 
			
		||||
@@ -631,51 +626,53 @@ class VMOps(object):
 | 
			
		||||
            - release the bootlock to allow the instance VM to start
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
 | 
			
		||||
        rescue_vm_ref = VMHelper.lookup(self._session,
 | 
			
		||||
                                    instance.name + "-rescue")
 | 
			
		||||
 | 
			
		||||
        if not rescue_vm:
 | 
			
		||||
        if not rescue_vm_ref:
 | 
			
		||||
            raise exception.NotFound(_(
 | 
			
		||||
                "Instance is not in Rescue Mode: %s" % instance.name))
 | 
			
		||||
 | 
			
		||||
        original_vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
 | 
			
		||||
        original_vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
 | 
			
		||||
 | 
			
		||||
        instance._rescue = False
 | 
			
		||||
 | 
			
		||||
        for vbd_ref in vbds:
 | 
			
		||||
            vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
 | 
			
		||||
            if vbd["userdevice"] == "1":
 | 
			
		||||
        for vbd_ref in vbd_refs:
 | 
			
		||||
            _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)
 | 
			
		||||
            if _vbd_ref["userdevice"] == "1":
 | 
			
		||||
                VMHelper.unplug_vbd(self._session, vbd_ref)
 | 
			
		||||
                VMHelper.destroy_vbd(self._session, vbd_ref)
 | 
			
		||||
 | 
			
		||||
        task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
 | 
			
		||||
        task1 = self._session.call_xenapi("Async.VM.hard_shutdown",
 | 
			
		||||
                                          rescue_vm_ref)
 | 
			
		||||
        self._session.wait_for_task(task1, instance.id)
 | 
			
		||||
 | 
			
		||||
        vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
 | 
			
		||||
        for vdi in vdis:
 | 
			
		||||
        vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
 | 
			
		||||
        for vdi_ref in vdi_refs:
 | 
			
		||||
            try:
 | 
			
		||||
                task = self._session.call_xenapi('Async.VDI.destroy', vdi)
 | 
			
		||||
                task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
 | 
			
		||||
                self._session.wait_for_task(task, instance.id)
 | 
			
		||||
            except self.XenAPI.Failure:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
        task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
 | 
			
		||||
        task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref)
 | 
			
		||||
        self._session.wait_for_task(task2, instance.id)
 | 
			
		||||
 | 
			
		||||
        self._release_bootlock(original_vm)
 | 
			
		||||
        self._start(instance, original_vm)
 | 
			
		||||
        self._release_bootlock(original_vm_ref)
 | 
			
		||||
        self._start(instance, original_vm_ref)
 | 
			
		||||
 | 
			
		||||
    def get_info(self, instance):
 | 
			
		||||
        """Return data about VM instance"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        rec = self._session.get_xenapi().VM.get_record(vm)
 | 
			
		||||
        return VMHelper.compile_info(rec)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
 | 
			
		||||
        return VMHelper.compile_info(vm_rec)
 | 
			
		||||
 | 
			
		||||
    def get_diagnostics(self, instance):
 | 
			
		||||
        """Return data about VM diagnostics"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        rec = self._session.get_xenapi().VM.get_record(vm)
 | 
			
		||||
        return VMHelper.compile_diagnostics(self._session, rec)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance)
 | 
			
		||||
        vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
 | 
			
		||||
        return VMHelper.compile_diagnostics(self._session, vm_rec)
 | 
			
		||||
 | 
			
		||||
    def get_console_output(self, instance):
 | 
			
		||||
        """Return snapshot of console"""
 | 
			
		||||
@@ -698,9 +695,9 @@ class VMOps(object):
 | 
			
		||||
        # at this stage even though they aren't implemented because these will
 | 
			
		||||
        # be needed for multi-nic and there was no sense writing it for single
 | 
			
		||||
        # network/single IP and then having to turn around and re-write it
 | 
			
		||||
        vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance.id)
 | 
			
		||||
        logging.debug(_("injecting network info to xenstore for vm: |%s|"),
 | 
			
		||||
                                                             vm_opaque_ref)
 | 
			
		||||
                        vm_ref)
 | 
			
		||||
        admin_context = context.get_admin_context()
 | 
			
		||||
        IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
 | 
			
		||||
        networks = db.network_get_all_by_instance(admin_context,
 | 
			
		||||
@@ -731,11 +728,10 @@ class VMOps(object):
 | 
			
		||||
                'ips': [ip_dict(ip) for ip in network_IPs],
 | 
			
		||||
                'ip6s': [ip6_dict(ip) for ip in network_IPs]}
 | 
			
		||||
 | 
			
		||||
            self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
 | 
			
		||||
            self.write_to_param_xenstore(vm_ref, {location: mapping})
 | 
			
		||||
 | 
			
		||||
            try:
 | 
			
		||||
                self.write_to_xenstore(vm_opaque_ref, location,
 | 
			
		||||
                                                      mapping['location'])
 | 
			
		||||
                self.write_to_xenstore(vm_ref, location, mapping['location'])
 | 
			
		||||
            except KeyError:
 | 
			
		||||
                # catch KeyError for domid if instance isn't running
 | 
			
		||||
                pass
 | 
			
		||||
@@ -747,8 +743,8 @@ class VMOps(object):
 | 
			
		||||
        Creates vifs for an instance
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
 | 
			
		||||
        logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance.id)
 | 
			
		||||
        logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)
 | 
			
		||||
        if networks is None:
 | 
			
		||||
            networks = db.network_get_all_by_instance(admin_context,
 | 
			
		||||
                                                      instance['id'])
 | 
			
		||||
@@ -768,12 +764,8 @@ class VMOps(object):
 | 
			
		||||
                except AttributeError:
 | 
			
		||||
                    device = "0"
 | 
			
		||||
 | 
			
		||||
                VMHelper.create_vif(
 | 
			
		||||
                    self._session,
 | 
			
		||||
                    vm_opaque_ref,
 | 
			
		||||
                    network_ref,
 | 
			
		||||
                    instance.mac_address,
 | 
			
		||||
                    device)
 | 
			
		||||
                VMHelper.create_vif(self._session, vm_ref, network_ref,
 | 
			
		||||
                                    instance.mac_address, device)
 | 
			
		||||
 | 
			
		||||
    def reset_network(self, instance):
 | 
			
		||||
        """
 | 
			
		||||
@@ -837,9 +829,9 @@ class VMOps(object):
 | 
			
		||||
        Any errors raised by the plugin will in turn raise a RuntimeError here.
 | 
			
		||||
        """
 | 
			
		||||
        instance_id = vm.id
 | 
			
		||||
        vm = self._get_vm_opaque_ref(vm)
 | 
			
		||||
        rec = self._session.get_xenapi().VM.get_record(vm)
 | 
			
		||||
        args = {'dom_id': rec['domid'], 'path': path}
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(vm)
 | 
			
		||||
        vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
 | 
			
		||||
        args = {'dom_id': vm_rec['domid'], 'path': path}
 | 
			
		||||
        args.update(addl_args)
 | 
			
		||||
        try:
 | 
			
		||||
            task = self._session.async_call_plugin(plugin, method, args)
 | 
			
		||||
@@ -919,9 +911,9 @@ class VMOps(object):
 | 
			
		||||
        value for 'keys' is passed, the returned dict is filtered to only
 | 
			
		||||
        return the values for those keys.
 | 
			
		||||
        """
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        data = self._session.call_xenapi_request('VM.get_xenstore_data',
 | 
			
		||||
                (vm, ))
 | 
			
		||||
                (vm_ref, ))
 | 
			
		||||
        ret = {}
 | 
			
		||||
        if keys is None:
 | 
			
		||||
            keys = data.keys()
 | 
			
		||||
@@ -939,11 +931,11 @@ class VMOps(object):
 | 
			
		||||
        """Takes a key/value pair and adds it to the xenstore parameter
 | 
			
		||||
        record for the given vm instance. If the key exists in xenstore,
 | 
			
		||||
        it is overwritten"""
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        self.remove_from_param_xenstore(instance_or_vm, key)
 | 
			
		||||
        jsonval = json.dumps(val)
 | 
			
		||||
        self._session.call_xenapi_request('VM.add_to_xenstore_data',
 | 
			
		||||
                (vm, key, jsonval))
 | 
			
		||||
                                          (vm_ref, key, jsonval))
 | 
			
		||||
 | 
			
		||||
    def write_to_param_xenstore(self, instance_or_vm, mapping):
 | 
			
		||||
        """Takes a dict and writes each key/value pair to the xenstore
 | 
			
		||||
@@ -958,14 +950,14 @@ class VMOps(object):
 | 
			
		||||
        them from the xenstore parameter record data for the given VM.
 | 
			
		||||
        If the key doesn't exist, the request is ignored.
 | 
			
		||||
        """
 | 
			
		||||
        vm = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        vm_ref = self._get_vm_opaque_ref(instance_or_vm)
 | 
			
		||||
        if isinstance(key_or_keys, basestring):
 | 
			
		||||
            keys = [key_or_keys]
 | 
			
		||||
        else:
 | 
			
		||||
            keys = key_or_keys
 | 
			
		||||
        for key in keys:
 | 
			
		||||
            self._session.call_xenapi_request('VM.remove_from_xenstore_data',
 | 
			
		||||
                    (vm, key))
 | 
			
		||||
                                              (vm_ref, key))
 | 
			
		||||
 | 
			
		||||
    def clear_param_xenstore(self, instance_or_vm):
 | 
			
		||||
        """Removes all data from the xenstore parameter record for this VM."""
 | 
			
		||||
 
 | 
			
		||||
@@ -117,16 +117,16 @@ class VolumeHelper(HelperBase):
 | 
			
		||||
    def introduce_vdi(cls, session, sr_ref):
 | 
			
		||||
        """Introduce VDI in the host"""
 | 
			
		||||
        try:
 | 
			
		||||
            vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
 | 
			
		||||
            vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref)
 | 
			
		||||
        except cls.XenAPI.Failure, exc:
 | 
			
		||||
            LOG.exception(exc)
 | 
			
		||||
            raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
 | 
			
		||||
        try:
 | 
			
		||||
            vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
 | 
			
		||||
            vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0])
 | 
			
		||||
        except cls.XenAPI.Failure, exc:
 | 
			
		||||
            LOG.exception(exc)
 | 
			
		||||
            raise StorageError(_('Unable to get record'
 | 
			
		||||
                                 ' of VDI %s on') % vdis[0])
 | 
			
		||||
                                 ' of VDI %s on') % vdi_refs[0])
 | 
			
		||||
        else:
 | 
			
		||||
            try:
 | 
			
		||||
                return session.get_xenapi().VDI.introduce(
 | 
			
		||||
 
 | 
			
		||||
@@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
 | 
			
		||||
                             address for the nova-volume host
 | 
			
		||||
:target_port:                iSCSI Target Port, 3260 Default
 | 
			
		||||
:iqn_prefix:                 IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
 | 
			
		||||
 | 
			
		||||
**Variable Naming Scheme**
 | 
			
		||||
 | 
			
		||||
- suffix "_ref" for opaque references
 | 
			
		||||
- suffix "_uuid" for UUIDs
 | 
			
		||||
- suffix "_rec" for record objects
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
 
 | 
			
		||||
@@ -436,7 +436,8 @@ class Serializer(object):
 | 
			
		||||
        try:
 | 
			
		||||
            return handlers[content_type]
 | 
			
		||||
        except Exception:
 | 
			
		||||
            raise exception.InvalidContentType()
 | 
			
		||||
            raise exception.InvalidContentType(_("Invalid content type %s"
 | 
			
		||||
                                                 % content_type))
 | 
			
		||||
 | 
			
		||||
    def _from_json(self, datastring):
 | 
			
		||||
        return utils.loads(datastring)
 | 
			
		||||
 
 | 
			
		||||
@@ -54,6 +54,7 @@ def main(dom_id, command, only_this_vif=None):
 | 
			
		||||
 | 
			
		||||
def execute(*command, return_stdout=False):
 | 
			
		||||
    devnull = open(os.devnull, 'w')
 | 
			
		||||
    command = map(str, command)
 | 
			
		||||
    proc = subprocess.Popen(command, close_fds=True,
 | 
			
		||||
                            stdout=subprocess.PIPE, stderr=devnull)
 | 
			
		||||
    devnull.close()
 | 
			
		||||
@@ -71,13 +72,13 @@ def apply_iptables_rules(command, params):
 | 
			
		||||
    iptables = lambda *rule: execute('/sbin/iptables', *rule)
 | 
			
		||||
 | 
			
		||||
    iptables('-D', 'FORWARD', '-m', 'physdev',
 | 
			
		||||
             '--physdev-in', '%(VIF)s' % params,
 | 
			
		||||
             '-s', '%(IP)s' % params,
 | 
			
		||||
             '--physdev-in', params['VIF'],
 | 
			
		||||
             '-s', params['IP'],
 | 
			
		||||
             '-j', 'ACCEPT')
 | 
			
		||||
    if command == 'online':
 | 
			
		||||
        iptables('-A', 'FORWARD', '-m', 'physdev',
 | 
			
		||||
                 '--physdev-in', '%(VIF)s' % params,
 | 
			
		||||
                 '-s', '%(IP)s' % params,
 | 
			
		||||
                 '--physdev-in', params['VIF'],
 | 
			
		||||
                 '-s', params['IP'],
 | 
			
		||||
                 '-j', 'ACCEPT')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -85,25 +86,24 @@ def apply_arptables_rules(command, params):
 | 
			
		||||
    arptables = lambda *rule: execute('/sbin/arptables', *rule)
 | 
			
		||||
 | 
			
		||||
    arptables('-D', 'FORWARD', '--opcode', 'Request',
 | 
			
		||||
              '--in-interface', '%(VIF)s' % params,
 | 
			
		||||
              '--source-ip', '%(IP)s' % params,
 | 
			
		||||
              '--source-mac', '%(MAC)s' % params,
 | 
			
		||||
              '--in-interface', params['VIF'],
 | 
			
		||||
              '--source-ip', params['IP'],
 | 
			
		||||
              '--source-mac', params['MAC'],
 | 
			
		||||
              '-j', 'ACCEPT')
 | 
			
		||||
    arptables('-D', 'FORWARD', '--opcode', 'Reply',
 | 
			
		||||
              '--in-interface', '%(VIF)s' % params,
 | 
			
		||||
              '--source-ip', '%(IP)s' % params,
 | 
			
		||||
              '--source-mac', '%(MAC)s' % params,
 | 
			
		||||
              '--in-interface', params['VIF'],
 | 
			
		||||
              '--source-ip', params['IP'],
 | 
			
		||||
              '--source-mac', params['MAC'],
 | 
			
		||||
              '-j', 'ACCEPT')
 | 
			
		||||
    if command == 'online':
 | 
			
		||||
        arptables('-A', 'FORWARD', '--opcode', 'Request',
 | 
			
		||||
                  '--in-interface', '%(VIF)s' % params
 | 
			
		||||
                  '--source-ip', '%(IP)s' % params,
 | 
			
		||||
                  '--source-mac', '%(MAC)s' % params,
 | 
			
		||||
                  '--in-interface', params['VIF'],
 | 
			
		||||
                  '--source-mac', params['MAC'],
 | 
			
		||||
                  '-j', 'ACCEPT')
 | 
			
		||||
        arptables('-A', 'FORWARD', '--opcode', 'Reply',
 | 
			
		||||
                  '--in-interface', '%(VIF)s' % params,
 | 
			
		||||
                  '--source-ip', '%(IP)s' % params,
 | 
			
		||||
                  '--source-mac', '%(MAC)s' % params,
 | 
			
		||||
                  '--in-interface', params['VIF'],
 | 
			
		||||
                  '--source-ip', params['IP'],
 | 
			
		||||
                  '--source-mac', params['MAC'],
 | 
			
		||||
                  '-j', 'ACCEPT')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -130,7 +130,7 @@ def apply_ebtables_rules(command, params):
 | 
			
		||||
             '-i', params['VIF'], '-j', 'DROP')
 | 
			
		||||
    if command == 'online':
 | 
			
		||||
        ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
 | 
			
		||||
                 '-i', '%(VIF)s', '-j', 'DROP')
 | 
			
		||||
                 '-i', params['VIF'], '-j', 'DROP')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
 
 | 
			
		||||
@@ -191,7 +191,7 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
 | 
			
		||||
        os.link(source, link_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _upload_tarball(staging_path, image_id, glance_host, glance_port):
 | 
			
		||||
def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
 | 
			
		||||
    """
 | 
			
		||||
    Create a tarball of the image and then stream that into Glance
 | 
			
		||||
    using chunked-transfer-encoded HTTP.
 | 
			
		||||
@@ -215,7 +215,10 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port):
 | 
			
		||||
        'x-image-meta-is-public': 'True',
 | 
			
		||||
        'x-image-meta-status': 'queued',
 | 
			
		||||
        'x-image-meta-disk-format': 'vhd',
 | 
			
		||||
        'x-image-meta-container-format': 'ovf'}
 | 
			
		||||
        'x-image-meta-container-format': 'ovf',
 | 
			
		||||
        'x-image-meta-property-os-type': os_type
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for header, value in headers.iteritems():
 | 
			
		||||
        conn.putheader(header, value)
 | 
			
		||||
    conn.endheaders()
 | 
			
		||||
@@ -337,11 +340,13 @@ def upload_vhd(session, args):
 | 
			
		||||
    glance_host = params["glance_host"]
 | 
			
		||||
    glance_port = params["glance_port"]
 | 
			
		||||
    sr_path = params["sr_path"]
 | 
			
		||||
    os_type = params["os_type"]
 | 
			
		||||
 | 
			
		||||
    staging_path = _make_staging_area(sr_path)
 | 
			
		||||
    try:
 | 
			
		||||
        _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
 | 
			
		||||
        _upload_tarball(staging_path, image_id, glance_host, glance_port)
 | 
			
		||||
        _upload_tarball(staging_path, image_id, glance_host, glance_port,
 | 
			
		||||
                        os_type)
 | 
			
		||||
    finally:
 | 
			
		||||
        _cleanup_staging_area(staging_path)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -10,6 +10,7 @@ boto==1.9b
 | 
			
		||||
carrot==0.10.5
 | 
			
		||||
eventlet==0.9.12
 | 
			
		||||
lockfile==0.8
 | 
			
		||||
python-novaclient==2.3
 | 
			
		||||
python-daemon==1.5.5
 | 
			
		||||
python-gflags==1.3
 | 
			
		||||
redis==2.0.0
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user