Merged with trunk

This commit is contained in:
Tushar Patil 2011-08-15 11:47:30 -07:00
commit 4fc0801c18
32 changed files with 895 additions and 133 deletions

View File

@ -27,6 +27,7 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
Donal Lafferty <donal.lafferty@citrix.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
@ -103,6 +104,7 @@ Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
Vivek Y S <vivek.ys@gmail.com>
Vladimir Popovski <vladimir@zadarastorage.com>
William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>

73
bin/clear_rabbit_queues Executable file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Admin/debug script to wipe rabbitMQ (AMQP) queues nova uses.
This can be used if you need to change durable options on queues,
or to wipe all messages in the queue system if things are in a
serious bad way.
"""
import datetime
import gettext
import os
import sys
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1)
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_boolean('delete_exchange', False, 'delete nova exchange too.')
def delete_exchange(exch):
conn = rpc.create_connection()
x = conn.get_channel()
x.exchange_delete(exch)
def delete_queues(queues):
conn = rpc.create_connection()
x = conn.get_channel()
for q in queues:
x.queue_delete(q)
if __name__ == '__main__':
utils.default_flagfile()
args = flags.FLAGS(sys.argv)
logging.setup()
delete_queues(args[1:])
if FLAGS.delete_exchange:
delete_exchange(FLAGS.control_exchange)

View File

@ -53,7 +53,7 @@ flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface')
LOG = logging.getLogger('nova.dhcpbridge')
def add_lease(mac, ip_address, _hostname, _interface):
def add_lease(mac, ip_address, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
LOG.debug(_("leasing ip"))
@ -67,13 +67,13 @@ def add_lease(mac, ip_address, _hostname, _interface):
"args": {"address": ip_address}})
def old_lease(mac, ip_address, hostname, interface):
def old_lease(mac, ip_address, interface):
"""Update just as add lease."""
LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
LOG.debug(_("Adopted old lease or got a change of mac"))
add_lease(mac, ip_address, interface)
def del_lease(mac, ip_address, _hostname, _interface):
def del_lease(mac, ip_address, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
LOG.debug(_("releasing ip"))
@ -115,11 +115,10 @@ def main():
if action in ['add', 'del', 'old']:
mac = argv[2]
ip = argv[3]
hostname = argv[4]
msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and"
" hostname %(hostname)s on interface %(interface)s") % locals()
msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s"
" on interface %(interface)s") % locals()
LOG.debug(msg)
globals()[action + '_lease'](mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, interface)
else:
print init_leases(interface)

View File

@ -882,6 +882,14 @@ class ServiceCommands(object):
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print print_format % (
_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated_At'))
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
@ -889,9 +897,9 @@ class ServiceCommands(object):
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
active, art,
svc['updated_at'])
print print_format % (svc['binary'], svc['host'],
svc['availability_zone'], active, art,
svc['updated_at'])
@args('--host', dest='host', metavar='<host>', help='Host')
@args('--service', dest='service', metavar='<service>',

View File

@ -355,6 +355,10 @@ class Executor(wsgi.Application):
LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex),
context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
except exception.InvalidPortRange as ex:
LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex),
context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),

View File

@ -169,13 +169,20 @@ def get_id_from_href(href):
Returns: 123
"""
if re.match(r'\d+$', str(href)):
LOG.debug(_("Attempting to treat %(href)s as an integer ID.") % locals())
try:
return int(href)
except ValueError:
pass
LOG.debug(_("Attempting to treat %(href)s as a URL.") % locals())
try:
return int(urlparse.urlsplit(href).path.split('/')[-1])
except ValueError, e:
LOG.debug(_("Error extracting id from href: %s") % href)
raise ValueError(_('could not parse id from href'))
except ValueError as error:
LOG.debug(_("Failed to parse ID from %(href)s: %(error)s") % locals())
raise
def remove_version_from_href(href):

View File

@ -104,12 +104,9 @@ class FloatingIPController(object):
ip = self.network_api.get_floating_ip(context, id)
if 'fixed_ip' in ip:
try:
self.disassociate(req, id, '')
except Exception as e:
LOG.exception(_("Error disassociating fixed_ip %s"), e)
self.disassociate(req, id)
self.network_api.release_floating_ip(context, address=ip)
self.network_api.release_floating_ip(context, address=ip['address'])
return {'released': {
"id": ip['id'],

View File

@ -123,6 +123,7 @@ class CreateInstanceHelper(object):
zone_blob = server_dict.get('blob')
user_data = server_dict.get('user_data')
availability_zone = server_dict.get('availability_zone')
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
@ -164,6 +165,7 @@ class CreateInstanceHelper(object):
min_count=min_count,
max_count=max_count,
user_data=user_data))
availability_zone=availability_zone))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:

View File

@ -163,7 +163,7 @@ class Controller(object):
@scheduler_api.redirect_handler
def update(self, req, id, body):
""" Updates the server name or password """
"""Update server name then pass on to version-specific controller"""
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
@ -178,17 +178,15 @@ class Controller(object):
self.helper._validate_server_name(name)
update_dict['display_name'] = name.strip()
self._parse_update(ctxt, id, body, update_dict)
try:
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
return exc.HTTPNoContent()
return self._update(ctxt, req, id, body)
def _parse_update(self, context, id, inst_dict, update_dict):
pass
def _update(self, context, req, id, inst_dict):
return exc.HTTPNotImplemented()
@scheduler_api.redirect_handler
def action(self, req, id, body):
@ -210,11 +208,15 @@ class Controller(object):
}
self.actions.update(admin_actions)
for key in self.actions.keys():
if key in body:
for key in body:
if key in self.actions:
return self.actions[key](body, req, id)
else:
msg = _("There is no such server action: %s") % (key,)
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPNotImplemented()
msg = _("Invalid request body")
raise exc.HTTPBadRequest(explanation=msg)
def _action_create_backup(self, input_dict, req, instance_id):
"""Backup a server instance.
@ -568,10 +570,11 @@ class ControllerV10(Controller):
def _limit_items(self, items, req):
return common.limited(items, req)
def _parse_update(self, context, server_id, inst_dict, update_dict):
def _update(self, context, req, id, inst_dict):
if 'adminPass' in inst_dict['server']:
self.compute_api.set_admin_password(context, server_id,
self.compute_api.set_admin_password(context, id,
inst_dict['server']['adminPass'])
return exc.HTTPNoContent()
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
@ -693,6 +696,10 @@ class ControllerV11(Controller):
LOG.info(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _update(self, context, req, id, inst_dict):
instance = self.compute_api.routing_get(context, id)
return self._build_view(req, instance, is_detail=True)
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
try:

View File

@ -393,10 +393,6 @@ class API(base.Base):
updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return instance
def _ask_scheduler_to_create_instance(self, context, base_options,
@ -565,18 +561,20 @@ class API(base.Base):
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group.id}})
def trigger_security_group_members_refresh(self, context, group_id):
def trigger_security_group_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for whom this is
relevant.
"""
# First, we get the security group rules that reference this group as
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = \
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id)
group_id))
# ..then we distill the security groups to which they belong..
security_groups = set()

View File

@ -1139,7 +1139,10 @@ def instance_get_all(context):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ips.floating_ips')).\
options(joinedload('virtual_interfaces')).\
options(joinedload_all('virtual_interfaces.network')).\
options(joinedload_all(
'virtual_interfaces.fixed_ips.floating_ips')).\
options(joinedload('virtual_interfaces.instance')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
@ -1202,6 +1205,7 @@ def instance_get_all_by_filters(context, filters):
options(joinedload_all('virtual_interfaces.network')).\
options(joinedload_all(
'virtual_interfaces.fixed_ips.floating_ips')).\
options(joinedload('virtual_interfaces.instance')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\

View File

@ -479,6 +479,11 @@ class SecurityGroupIngressRule(BASE, NovaBase):
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
class ProviderFirewallRule(BASE, NovaBase):

View File

@ -19,37 +19,79 @@
Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
from sqlalchemy import pool
from sqlalchemy.orm import sessionmaker
import eventlet.patcher
eventlet.patcher.monkey_patch()
from nova import exception
from nova import flags
import eventlet.db_pool
import sqlalchemy.orm
import sqlalchemy.pool
import nova.exception
import nova.flags
import nova.log
FLAGS = nova.flags.FLAGS
LOG = nova.log.getLogger("nova.db.sqlalchemy")
try:
import MySQLdb
except ImportError:
MySQLdb = None
FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _ENGINE
global _MAKER
if not _MAKER:
if not _ENGINE:
kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
'echo': False}
"""Return a SQLAlchemy session."""
global _ENGINE, _MAKER
if FLAGS.sql_connection.startswith('sqlite'):
kwargs['poolclass'] = pool.NullPool
if _MAKER is None or _ENGINE is None:
_ENGINE = get_engine()
_MAKER = get_maker(_ENGINE, autocommit, expire_on_commit)
_ENGINE = create_engine(FLAGS.sql_connection,
**kwargs)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
session.query = exception.wrap_db_error(session.query)
session.flush = exception.wrap_db_error(session.flush)
session.query = nova.exception.wrap_db_error(session.query)
session.flush = nova.exception.wrap_db_error(session.flush)
return session
def get_engine():
"""Return a SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
engine_args = {
"pool_recycle": FLAGS.sql_idle_timeout,
"echo": False,
}
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = sqlalchemy.pool.NullPool
elif MySQLdb and "mysql" in connection_dict.drivername:
LOG.info(_("Using mysql/eventlet db_pool."))
pool_args = {
"db": connection_dict.database,
"passwd": connection_dict.password,
"host": connection_dict.host,
"user": connection_dict.username,
"min_size": FLAGS.sql_min_pool_size,
"max_size": FLAGS.sql_max_pool_size,
"max_idle": FLAGS.sql_idle_timeout,
}
creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args)
engine_args["pool_size"] = FLAGS.sql_max_pool_size
engine_args["pool_timeout"] = FLAGS.sql_pool_timeout
engine_args["creator"] = creator.create
return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)

View File

@ -305,6 +305,7 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues')
DEFINE_list('enabled_apis', ['ec2', 'osapi'],
'list of APIs to enable by default')
DEFINE_string('ec2_host', '$my_ip', 'ip of api server')
@ -345,6 +346,12 @@ DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
DEFINE_integer('logfile_mode', 0644, 'Default file mode of the logs.')
DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_integer('sql_pool_timeout', 30,
'seconds to wait for connection from pool before erroring')
DEFINE_integer('sql_min_pool_size', 10,
'minimum number of SQL connections to pool')
DEFINE_integer('sql_max_pool_size', 10,
'maximum number of SQL connections to pool')
DEFINE_string('sql_connection',
'sqlite:///$state_path/$sqlite_db',
'connection string for sql database')

View File

@ -61,6 +61,7 @@ from nova import quota
from nova import utils
from nova import rpc
from nova.network import api as network_api
from nova.compute import api as compute_api
import random
@ -313,6 +314,7 @@ class NetworkManager(manager.SchedulerDependentManager):
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
self.network_api = network_api.API()
self.compute_api = compute_api.API()
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
@ -368,6 +370,15 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host)
return host
def _do_trigger_security_group_members_refresh_for_instance(self,
instance_id):
admin_context = context.get_admin_context()
instance_ref = self.db.instance_get(admin_context, instance_id)
groups = instance_ref['security_groups']
group_ids = [group['id'] for group in groups]
self.compute_api.trigger_security_group_members_refresh(admin_context,
group_ids)
def _get_networks_for_instance(self, context, instance_id, project_id):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
@ -559,6 +570,8 @@ class NetworkManager(manager.SchedulerDependentManager):
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_id, network['id'])
values = {'allocated': True,
@ -573,6 +586,11 @@ class NetworkManager(manager.SchedulerDependentManager):
self.db.fixed_ip_update(context, address,
{'allocated': False,
'virtual_interface_id': None})
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
instance_id = instance_ref['id']
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
@ -614,6 +632,64 @@ class NetworkManager(manager.SchedulerDependentManager):
network_ref = self.db.fixed_ip_get_network(context, address)
self._setup_network(context, network_ref)
def _validate_cidrs(self, context, cidr, num_networks, network_size):
significant_bits = 32 - int(math.log(network_size, 2))
req_net = netaddr.IPNetwork(cidr)
req_net_ip = str(req_net.ip)
req_size = network_size * num_networks
if req_size > req_net.size:
msg = _("network_size * num_networks exceeds cidr size")
raise ValueError(msg)
adjusted_cidr_str = req_net_ip + '/' + str(significant_bits)
adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str)
try:
used_nets = self.db.network_get_all(context)
except exception.NoNetworksFound:
used_nets = []
used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets]
if adjusted_cidr in used_cidrs:
raise ValueError(_("cidr already in use"))
for adjusted_cidr_supernet in adjusted_cidr.supernet():
if adjusted_cidr_supernet in used_cidrs:
msg = _("requested cidr (%s) conflicts with existing supernet")
raise ValueError(msg % str(adjusted_cidr))
# watch for smaller subnets conflicting
used_supernets = []
for used_cidr in used_cidrs:
if not used_cidr:
continue
if used_cidr.size < network_size:
for ucsupernet in used_cidr.supernet():
if ucsupernet.size == network_size:
used_supernets.append(ucsupernet)
all_req_nets = []
if num_networks == 1:
if adjusted_cidr in used_supernets:
msg = _("requested cidr (%s) conflicts with existing smaller"
" cidr")
raise ValueError(msg % str(adjusted_cidr))
else:
all_req_nets.append(adjusted_cidr)
elif num_networks >= 2:
# split supernet into subnets
next_cidr = adjusted_cidr
for index in range(num_networks):
if next_cidr.first > req_net.last:
msg = _("Not enough subnets avail to satisfy requested "
"num_net works - some subnets in requested range"
" already in use")
raise ValueError(msg)
while True:
used_values = used_cidrs + used_supernets
if next_cidr in used_values:
next_cidr = next_cidr.next()
else:
all_req_nets.append(next_cidr)
next_cidr = next_cidr.next()
break
all_req_nets = sorted(list(set(all_req_nets)))
return all_req_nets
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
@ -624,8 +700,8 @@ class NetworkManager(manager.SchedulerDependentManager):
network_size_v6 = 1 << 64
if cidr:
fixed_net = netaddr.IPNetwork(cidr)
significant_bits = 32 - int(math.log(network_size, 2))
req_cidrs = self._validate_cidrs(context, cidr, num_networks,
network_size)
for index in range(num_networks):
net = {}
@ -635,9 +711,7 @@ class NetworkManager(manager.SchedulerDependentManager):
net['dns2'] = dns2
if cidr:
start = index * network_size
project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start],
significant_bits))
project_net = req_cidrs[index]
net['cidr'] = str(project_net)
net['multi_host'] = multi_host
net['netmask'] = str(project_net.netmask)
@ -857,7 +931,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
address = self.db.fixed_ip_associate_pool(context,
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
vif = self.db.virtual_interface_get_by_instance_and_network(context,
instance_id,
network['id'])

View File

@ -257,7 +257,7 @@ class TopicAdapterConsumer(AdapterConsumer):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
self.durable = FLAGS.rabbit_durable_queues
super(TopicAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
@ -345,7 +345,7 @@ class TopicPublisher(Publisher):
def __init__(self, connection=None, topic='broadcast'):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
self.durable = FLAGS.rabbit_durable_queues
super(TopicPublisher, self).__init__(connection=connection)
@ -373,6 +373,7 @@ class DirectConsumer(Consumer):
self.queue = msg_id
self.routing_key = msg_id
self.exchange = msg_id
self.durable = False
self.auto_delete = True
self.exclusive = True
super(DirectConsumer, self).__init__(connection=connection)
@ -386,6 +387,7 @@ class DirectPublisher(Publisher):
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
self.durable = False
self.auto_delete = True
super(DirectPublisher, self).__init__(connection=connection)
@ -573,7 +575,7 @@ def send_message(topic, message, wait=True):
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
durable=False,
durable=FLAGS.rabbit_durable_queues,
exchange_type='topic',
routing_key=topic)
publisher.send(message)

View File

@ -34,12 +34,13 @@ from nova.scheduler import zone_manager
LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
'Driver to use for the scheduler')
'nova.scheduler.multi.MultiScheduler',
'Default driver to use for the scheduler')
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
def __init__(self, scheduler_driver=None, *args, **kwargs):
self.zone_manager = zone_manager.ZoneManager()
if not scheduler_driver:
@ -71,8 +72,8 @@ class SchedulerManager(manager.Manager):
def update_service_capabilities(self, context=None, service_name=None,
host=None, capabilities=None):
"""Process a capability update from a service node."""
if not capability:
capability = {}
if not capabilities:
capabilities = {}
self.zone_manager.update_service_capabilities(service_name,
host, capabilities)

73
nova/scheduler/multi.py Normal file
View File

@ -0,0 +1,73 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler that allows routing some calls to one driver and others to another.
"""
from nova import flags
from nova import utils
from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DEFINE_string('compute_scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
'Driver to use for scheduling compute calls')
flags.DEFINE_string('volume_scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
'Driver to use for scheduling volume calls')
# A mapping of methods to topics so we can figure out which driver to use.
_METHOD_MAP = {'run_instance': 'compute',
'start_instance': 'compute',
'create_volume': 'volume'}
class MultiScheduler(driver.Scheduler):
"""A scheduler that holds multiple sub-schedulers.
This exists to allow flag-driven composibility of schedulers, allowing
third parties to integrate custom schedulers more easily.
"""
def __init__(self):
super(MultiScheduler, self).__init__()
compute_driver = utils.import_object(FLAGS.compute_scheduler_driver)
volume_driver = utils.import_object(FLAGS.volume_scheduler_driver)
self.drivers = {'compute': compute_driver,
'volume': volume_driver}
def __getattr__(self, key):
if not key.startswith('schedule_'):
raise AttributeError(key)
method = key[len('schedule_'):]
if method not in _METHOD_MAP:
raise AttributeError(key)
return getattr(self.drivers[_METHOD_MAP[method]], key)
def set_zone_manager(self, zone_manager):
for k, v in self.drivers.iteritems():
v.set_zone_manager(zone_manager)
def schedule(self, context, topic, *_args, **_kwargs):
return self.drivers[topic].schedule(context, topic, *_args, **_kwargs)

View File

@ -28,6 +28,7 @@ def fake_keypair(name):
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
@ -77,8 +78,21 @@ class KeypairsTest(test.TestCase):
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_import(self):
body = {'keypair': {'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}}
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v1.1/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
@ -96,4 +110,3 @@ class KeypairsTest(test.TestCase):
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)

View File

@ -249,6 +249,10 @@ class MiscFunctionsTest(test.TestCase):
common.get_id_from_href,
fixture)
def test_get_id_from_href_int(self):
fixture = 1
self.assertEqual(fixture, common.get_id_from_href(fixture))
def test_get_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = '1.1'

View File

@ -281,7 +281,7 @@ class ActionExtensionTest(test.TestCase):
def test_invalid_action_body(self):
body = dict(blah=dict(name="test")) # Doesn't exist
response = self._send_server_action_request("/servers/1/action", body)
self.assertEqual(501, response.status_int)
self.assertEqual(400, response.status_int)
def test_invalid_action(self):
body = dict(blah=dict(name="test"))

View File

@ -352,7 +352,7 @@ class ServerActionsTest(test.TestCase):
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(501, response.status_int)
self.assertEqual(400, response.status_int)
def test_create_backup_with_metadata(self):
self.flags(allow_admin_api=True)
@ -487,6 +487,24 @@ class ServerActionsTestV11(test.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
def test_server_bad_body(self):
body = {}
req = webob.Request.blank('/v1.1/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_unknown_action(self):
body = {'sockTheFox': {'fakekey': '1234'}}
req = webob.Request.blank('/v1.1/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)

View File

@ -134,8 +134,8 @@ def return_security_group(context, instance_id, security_group_id):
pass
def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
def instance_update(context, instance_id, values):
return stub_instance(instance_id, name=values.get('display_name'))
def instance_addresses(context, instance_id):
@ -145,7 +145,7 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
public_addresses=None, host=None, power_state=0,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", interfaces=None):
flavor_id="1", interfaces=None, name=None):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@ -161,7 +161,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
host = str(host)
# ReservationID isn't sent back, hack it in there.
server_name = "server%s" % id
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
@ -1653,6 +1653,22 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_create_instance_v1_1_invalid_flavor_id_int(self):
self._setup_for_create_instance()
image_href = 'http://localhost/v1.1/images/2'
flavor_ref = -1
body = dict(server=dict(
name='server_test', imageRef=image_href, flavorRef=flavor_ref,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_create_instance_v1_1_bad_flavor_href(self):
self._setup_for_create_instance()
@ -1864,13 +1880,17 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_update_server_name_v1_1(self):
self.stubs.Set(nova.db.api, 'instance_get',
return_server_with_attributes(name='server_test'))
req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps({'server': {'name': 'new-name'}})
req.body = json.dumps({'server': {'name': 'server_test'}})
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
self.assertEqual(res.body, '')
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_adminPass_ignored_v1_1(self):
inst_dict = dict(name='server_test', adminPass='bacon')
@ -1881,16 +1901,19 @@ class ServersTest(test.TestCase):
self.assertEqual(params, filtered_dict)
return filtered_dict
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
self.stubs.Set(nova.db.api, 'instance_update', server_update)
self.stubs.Set(nova.db.api, 'instance_get',
return_server_with_attributes(name='server_test'))
req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
req.content_type = "application/json"
req.body = self.body
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
self.assertEqual(res.body, '')
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_create_backup_schedules(self):
req = webob.Request.blank('/v1.0/servers/1/backup_schedule')

View File

@ -32,6 +32,7 @@ class FakeGlance(object):
IMAGE_RAMDISK = 3
IMAGE_RAW = 4
IMAGE_VHD = 5
IMAGE_ISO = 6
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
@ -58,6 +59,11 @@ class FakeGlance(object):
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
'image_data': StringIO.StringIO('')},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
'image_data': StringIO.StringIO('')}}
def __init__(self, host, port=None, use_ssl=False, auth_tok=None):

View File

@ -36,8 +36,9 @@ from nova import test
from nova import rpc
from nova import utils
from nova.scheduler import api
from nova.scheduler import manager
from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
from nova.compute import power_state
@ -391,7 +392,7 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down_no_queue(self):
def test_wont_schedule_if_specified_host_is_down_no_queue(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
@ -903,6 +904,25 @@ class SimpleDriverTestCase(test.TestCase):
db.service_destroy(self.context, s_ref2['id'])
class MultiDriverTestCase(SimpleDriverTestCase):
"""Test case for multi driver."""
def setUp(self):
super(MultiDriverTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
max_cores=4,
max_gigabytes=4,
network_manager='nova.network.manager.FlatManager',
volume_driver='nova.volume.driver.FakeISCSIDriver',
compute_scheduler_driver=('nova.scheduler.simple'
'.SimpleScheduler'),
volume_scheduler_driver=('nova.scheduler.simple'
'.SimpleScheduler'),
scheduler_driver='nova.scheduler.multi.MultiScheduler')
self.scheduler = manager.SchedulerManager()
class FakeZone(object):
def __init__(self, id, api_url, username, password):
self.id = id

View File

@ -71,12 +71,12 @@ def _create_network_info(count=1, ipv6=None):
return [(network, mapping) for x in xrange(0, count)]
def _setup_networking(instance_id, ip='1.2.3.4'):
def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
vif = {'address': mac,
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
@ -884,7 +884,11 @@ class IptablesFirewallTestCase(test.TestCase):
def test_static_filters(self):
instance_ref = self._create_instance_ref()
_setup_networking(instance_ref['id'], self.test_ip)
src_instance_ref = self._create_instance_ref()
src_ip = '10.11.12.14'
src_mac = '56:12:12:12:12:13'
_setup_networking(instance_ref['id'], self.test_ip, src_mac)
_setup_networking(src_instance_ref['id'], src_ip)
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
@ -893,6 +897,12 @@ class IptablesFirewallTestCase(test.TestCase):
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
@ -914,9 +924,19 @@ class IptablesFirewallTestCase(test.TestCase):
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['id'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['id'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
@ -969,17 +989,22 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
'--icmp-type 8 -j ACCEPT')
regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
'--dports 80:81 -j ACCEPT')
regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % (src_ip,))
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])

View File

@ -210,7 +210,11 @@ class VlanNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
@ -247,6 +251,17 @@ class CommonNetworkTestCase(test.TestCase):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
raise exception.NetworkNotFoundForCidr()
def network_create_safe(self, context, net):
fakenet = {}
fakenet['id'] = 999
return fakenet
def network_get_all(self, context):
raise exception.NoNetworksFound()
def __init__(self):
self.db = self.FakeDB()
self.deallocate_called = None
@ -254,6 +269,9 @@ class CommonNetworkTestCase(test.TestCase):
def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address
def fake_create_fixed_ips(self, context, network_id):
return None
def test_remove_fixed_ip_from_instance(self):
manager = self.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1')
@ -265,3 +283,165 @@ class CommonNetworkTestCase(test.TestCase):
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
None, 99, 'bad input')
def test_validate_cidrs(self):
manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256)
self.assertEqual(1, len(nets))
cidrs = [str(net) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128)
self.assertEqual(2, len(nets))
cidrs = [str(net) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256)
self.assertEqual(4, len(nets))
cidrs = [str(net) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = [None, '192.168.2.0/24', 1, 256]
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256)
self.assertEqual(4, len(nets))
cidrs = [str(net) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32)
self.assertEqual(3, len(nets))
cidrs = [str(net) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = [None, '192.168.2.0/24', 3, 64]
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_validate_cidrs_one_in_use(self):
manager = self.FakeNetworkManager()
args = [None, '192.168.0.0/24', 2, 256]
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_validate_cidrs_already_used(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
args = [None, '192.168.0.0/24', 1, 256]
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_validate_cidrs_too_many(self):
manager = self.FakeNetworkManager()
args = [None, '192.168.0.0/24', 200, 256]
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_validate_cidrs_split_partial(self):
manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256)
returned_cidrs = [str(net) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, '192.168.0.0/24', 1, 256]
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager._validate_cidrs, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = self.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None]
result = manager.create_networks(*args)
self.assertEqual(manager.create_networks(*args), None)
def test_create_networks_cidr_already_used(self):
manager = self.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = self.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None]
self.assertEqual(manager.create_networks(*args), None)

View File

@ -519,6 +519,11 @@ class XenAPIVMTestCase(test.TestCase):
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,

View File

@ -664,11 +664,10 @@ class IptablesFirewallDriver(FirewallDriver):
LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
# Eventually, a mechanism to grant access for security
# groups will turn up here. It'll use ipsets.
continue
version = 4
else:
version = netutils.get_ip_version(rule.cidr)
version = netutils.get_ip_version(rule.cidr)
if version == 4:
fw_rules = ipv4_rules
else:
@ -678,16 +677,16 @@ class IptablesFirewallDriver(FirewallDriver):
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule.cidr]
args = ['-j ACCEPT', '-p', protocol]
if rule.protocol in ['udp', 'tcp']:
if protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
args += ['--dport', '%s' % (rule.from_port,)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule.from_port,
rule.to_port)]
elif rule.protocol == 'icmp':
elif protocol == 'icmp':
icmp_type = rule.from_port
icmp_code = rule.to_port
@ -706,9 +705,22 @@ class IptablesFirewallDriver(FirewallDriver):
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j ACCEPT']
fw_rules += [' '.join(args)]
if rule.cidr:
LOG.info('Using cidr %r', rule.cidr)
args += ['-s', rule.cidr]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
for instance in rule['grantee_group']['instances']:
LOG.info('instance: %r', instance)
ips = db.instance_get_fixed_addresses(ctxt,
instance['id'])
LOG.info('ips: %r', ips)
for ip in ips:
subrule = args + ['-s %s' % ip]
fw_rules += [' '.join(subrule)]
LOG.info('Using fw_rules: %r', fw_rules)
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
@ -719,7 +731,8 @@ class IptablesFirewallDriver(FirewallDriver):
return self.nwfilter.instance_filter_exists(instance)
def refresh_security_group_members(self, security_group):
pass
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_security_group_rules(self, security_group, network_info=None):
self.do_refresh_security_group_rules(security_group, network_info)

View File

@ -194,6 +194,7 @@ def create_local_pifs():
Do this one per host."""
for host_ref in _db_content['host'].keys():
_create_local_pif(host_ref)
_create_local_sr_iso(host_ref)
def create_local_srs():
@ -222,6 +223,25 @@ def _create_local_sr(host_ref):
return sr_ref
def _create_local_sr_iso(host_ref):
sr_ref = _create_object(
'SR',
{'name_label': 'Local storage ISO',
'type': 'lvm',
'content_type': 'iso',
'shared': False,
'physical_size': str(1 << 30),
'physical_utilisation': str(0),
'virtual_allocation': str(0),
'other_config': {
'i18n-original-value-name_label': 'Local storage ISO',
'i18n-key': 'local-storage-iso'},
'VDIs': []})
pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',

View File

@ -77,6 +77,7 @@ class ImageType:
3 - raw disk image (local SR, NOT partitioned by plugin)
4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
5 - ISO disk image (local SR, NOT partitioned by plugin)
"""
KERNEL = 0
@ -84,14 +85,17 @@ class ImageType:
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD)
DISK_ISO = 5
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "os"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR)
DISK_ISO_STR = "iso"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR)
@classmethod
def to_string(cls, image_type):
@ -222,6 +226,30 @@ class VMHelper(HelperBase):
' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference specific to CDRom devices."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = 'RO'
vbd_rec['type'] = 'CD'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating a CDROM-specific VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created a CDROM-specific VBD %(vbd_ref)s '
' for VM %(vm_ref)s, VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
@ -367,6 +395,23 @@ class VMHelper(HelperBase):
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(task, instance.id)
@classmethod
def fetch_blank_disk(cls, session, instance_type_id):
# Size the blank harddrive to suit the machine type:
one_gig = 1024 * 1024 * 1024
req_type = instance_types.get_instance_type(instance_type_id)
req_size = req_type['local_gb']
LOG.debug("Creating blank HD of size %(req_size)d gigs"
% locals())
vdi_size = one_gig * req_size
LOG.debug("ISO vm create: Looking for the SR")
sr_ref = safe_find_sr(session)
vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False)
return vdi_ref
@classmethod
def fetch_image(cls, context, session, instance_id, image, user_id,
project_id, image_type):
@ -449,7 +494,12 @@ class VMHelper(HelperBase):
# DISK restores
LOG.debug(_("Fetching image %(image)s") % locals())
LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type))
sr_ref = safe_find_sr(session)
if image_type == ImageType.DISK_ISO:
sr_ref = safe_find_iso_sr(session)
LOG.debug(_("ISO: Found sr possibly containing the ISO image"))
else:
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
glance_client.set_auth_token(getattr(context, 'auth_token', None))
@ -527,7 +577,8 @@ class VMHelper(HelperBase):
ImageType.RAMDISK: 'RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
ImageType.DISK_VHD: 'DISK_VHD',
ImageType.DISK_ISO: 'DISK_ISO'}
disk_format = pretty_format[image_type]
image_ref = instance.image_ref
instance_id = instance.id
@ -540,7 +591,8 @@ class VMHelper(HelperBase):
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO}
image_ref = instance.image_ref
glance_client, image_id = nova.image.get_glance_client(image_ref)
meta = glance_client.get_image_meta(image_id)
@ -574,6 +626,8 @@ class VMHelper(HelperBase):
available
3. Glance (DISK): pv is assumed
4. Glance (DISK_ISO): no pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
@ -589,6 +643,9 @@ class VMHelper(HelperBase):
elif disk_image_type == ImageType.DISK:
# 3. Disk
is_pv = True
elif disk_image_type == ImageType.DISK_ISO:
# 4. ISO
is_pv = False
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
% locals())
@ -832,6 +889,48 @@ def find_sr(session):
return None
def safe_find_iso_sr(session):
"""Same as find_iso_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def find_iso_sr(session):
"""Return the storage repository to hold ISO images"""
host = session.get_xenapi_host()
sr_refs = session.get_xenapi().SR.get_all()
for sr_ref in sr_refs:
sr_rec = session.get_xenapi().SR.get_record(sr_ref)
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
if not sr_rec['content_type'] == 'iso':
LOG.debug(_("ISO: not iso content"))
continue
if not 'i18n-key' in sr_rec['other_config']:
LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug(_("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'"))
continue
LOG.debug(_("ISO: SR MATCHing our criteria"))
for pbd_ref in sr_rec['PBDs']:
LOG.debug(_("ISO: ISO, looking to see if it is host local"))
pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
pbd_rec_host = pbd_rec['host']
LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s") %
locals())
if pbd_rec_host == host:
LOG.debug(_("ISO: SR with local PBD"))
return sr_ref
return None
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device

View File

@ -186,7 +186,7 @@ class VMOps(object):
instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(context, self._session,
instance.id, instance.kernel_id, instance.user_id,
instance.id, instance.ramdisk_id, instance.user_id,
instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
@ -235,21 +235,9 @@ class VMOps(object):
raise vm_create_error
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
# Attach any other disks
# userdevice 1 is reserved for rescue
userdevice = 2
for vdi in vdis[1:]:
# vdi['vdi_type'] is either 'os' or 'swap', but we don't
# really care what it is right here.
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdi['vdi_uuid'])
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=userdevice,
bootable=False)
userdevice += 1
# Add disks to VM
self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
vdis)
# Alter the image before VM start for, e.g. network injection
if FLAGS.flat_injected:
@ -260,6 +248,48 @@ class VMOps(object):
self.inject_network_info(instance, network_info, vm_ref)
return vm_ref
def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
vdis):
# device 0 reserved for RW disk
userdevice = 0
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == ImageType.DISK_ISO:
LOG.debug("detected ISO image type, going to create blank VM for "
"install")
cd_vdi_ref = first_vdi_ref
first_vdi_ref = VMHelper.fetch_blank_disk(session=self._session,
instance_type_id=instance.instance_type_id)
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=False)
# device 1 reserved for rescue disk and we've used '0'
userdevice = 2
VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=cd_vdi_ref, userdevice=userdevice, bootable=True)
# set user device to next free value
userdevice += 1
else:
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True)
# set user device to next free value
# userdevice 1 is reserved for rescue and we've used '0'
userdevice = 2
# Attach any other disks
for vdi in vdis[1:]:
# vdi['vdi_type'] is either 'os' or 'swap', but we don't
# really care what it is right here.
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdi['vdi_uuid'])
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=userdevice,
bootable=False)
userdevice += 1
def _spawn(self, instance, vm_ref):
"""Spawn a new instance."""
LOG.debug(_('Starting VM %s...'), vm_ref)