diff --git a/Authors b/Authors index 6741c81ff..26e1281f4 100644 --- a/Authors +++ b/Authors @@ -17,6 +17,7 @@ Christian Berendt Chuck Short Cory Wright Dan Prince +Dave Walker David Pravec Dean Troyer Devin Carlen @@ -29,6 +30,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews @@ -65,6 +67,7 @@ Nachi Ueno Naveed Massjouni Nirmal Ranganathan Paul Voccio +Renuka Apte Ricardo Carrillo Cruz Rick Clark Rick Harris diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f42dfd6b5..5926b97de 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -108,6 +108,13 @@ def main(): interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags + + #if FLAGS.fake_rabbit: + # LOG.debug(_("leasing ip")) + # network_manager = utils.import_object(FLAGS.network_manager) + ## reload(fake_flags) + # from nova.tests import fake_flags + action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/bin/nova-manage b/bin/nova-manage index e3ed7b9d0..26c0d776c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('images_path', 'nova.image.local') -flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn') +flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) @@ -417,12 +417,16 @@ class ProjectCommands(object): arguments: project_id [key] [value]""" ctxt = context.get_admin_context() if key: + if value.lower() == 'unlimited': + value = None try: db.quota_update(ctxt, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(ctxt, project_id, key, value) - project_quota = quota.get_quota(ctxt, project_id) + project_quota = quota.get_project_quotas(ctxt, project_id) for key, value in project_quota.iteritems(): + if value is None: + value = 'unlimited' print '%s: %s' % (key, value) def remove(self, project_id, user_id): diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 9613ba990..0a5a7a4d6 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -35,6 +35,7 @@ Programming Concepts .. toctree:: :maxdepth: 3 + zone rabbit API Reference diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst index 3dd9d37d3..263560ee2 100644 --- a/doc/source/devref/zone.rst +++ b/doc/source/devref/zone.rst @@ -17,7 +17,7 @@ Zones ===== -A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. +A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion. @@ -34,7 +34,7 @@ Routing between Zones is based on the Capabilities of that Zone. Capabilities ar key=value;value;value, key=value;value;value -Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. Flow within a Zone ------------------ @@ -47,7 +47,7 @@ Inter-service communication within a Zone is done with RabbitMQ. Each class of S These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. -The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova"). +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). Zone administrative functions ----------------------------- diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst index 9c54f3608..397cc8e80 100644 --- a/doc/source/man/novamanage.rst +++ b/doc/source/man/novamanage.rst @@ -6,7 +6,7 @@ nova-manage control and manage cloud computer instances and images ------------------------------------------------------ -:Author: nova@lists.launchpad.net +:Author: openstack@lists.launchpad.net :Date: 2010-11-16 :Copyright: OpenStack LLC :Version: 0.1 @@ -121,7 +121,7 @@ Nova Role nova-manage role [] ``nova-manage role add <(optional) projectname>`` - Add a user to either a global or project-based role with the indicated assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. + Add a user to either a global or project-based role with the indicated assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``nova-manage role has `` Checks the user or project and responds with True if the user has a global role with a particular project. diff --git a/doc/source/runnova/managing.users.rst b/doc/source/runnova/managing.users.rst index 392142e86..d3442bed9 100644 --- a/doc/source/runnova/managing.users.rst +++ b/doc/source/runnova/managing.users.rst @@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to Nova’s rights management system employs the RBAC model and currently supports the following five roles: -* **Cloud Administrator.** (admin) Users of this class enjoy complete system access. +* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access. * **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances. -* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. +* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. * **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules. -* **Developer.** This is a general purpose role that is assigned to users by default. +* **Developer.** (developer) This is a general purpose role that is assigned to users by default. RBAC management is exposed through the dashboard for simplified user management. diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index cda2ecc28..8170fcafe 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null) +NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" diff --git a/nova/exception.py b/nova/exception.py index 56c20d111..02c65fd64 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -271,6 +271,14 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8caf..e7e9dab77 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -96,17 +97,29 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS + LOG.debug("Adding consumer %s", consumer_tag) + CONSUMERS[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + global CONSUMERS + LOG.debug("Removing consumer %s", consumer_tag) + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in CONSUMERS.itervalues(): + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES @@ -134,5 +147,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/flags.py b/nova/flags.py index 32cb6efa8..9eaac5596 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] @@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues): if '__stored_argv' not in self.__dict__: return new_flags = FlagValues(self) - for k in self.__dict__['__dirty']: + for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) + new_flags.Reset() new_flags(self.__dict__['__stored_argv']) - for k in self.__dict__['__dirty']: + for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) self.ClearDirty() diff --git a/nova/quota.py b/nova/quota.py index a93cd0766..58766e846 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -28,6 +28,8 @@ flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') +flags.DEFINE_integer('quota_ram', 50 * 1024, + 'megabytes of instance ram allowed per project') flags.DEFINE_integer('quota_volumes', 10, 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, @@ -44,14 +46,28 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') -def get_quota(context, project_id): - rval = {'instances': FLAGS.quota_instances, - 'cores': FLAGS.quota_cores, - 'volumes': FLAGS.quota_volumes, - 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips, - 'metadata_items': FLAGS.quota_metadata_items} +def _get_default_quotas(): + defaults = { + 'instances': FLAGS.quota_instances, + 'cores': FLAGS.quota_cores, + 'ram': FLAGS.quota_ram, + 'volumes': FLAGS.quota_volumes, + 'gigabytes': FLAGS.quota_gigabytes, + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items, + 'injected_files': FLAGS.quota_max_injected_files, + 'injected_file_content_bytes': + FLAGS.quota_max_injected_file_content_bytes, + } + # -1 in the quota flags means unlimited + for key in defaults.keys(): + if defaults[key] == -1: + defaults[key] = None + return defaults + +def get_project_quotas(context, project_id): + rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: @@ -65,71 +81,81 @@ def _get_request_allotment(requested, used, quota): return quota - used -def allowed_instances(context, num_instances, instance_type): - """Check quota and return min(num_instances, allowed_instances).""" +def allowed_instances(context, requested_instances, instance_type): + """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() - num_cores = num_instances * instance_type['vcpus'] - used_instances, used_cores = db.instance_data_get_for_project(context, - project_id) - quota = get_quota(context, project_id) - allowed_instances = _get_request_allotment(num_instances, used_instances, + requested_cores = requested_instances * instance_type['vcpus'] + requested_ram = requested_instances * instance_type['memory_mb'] + usage = db.instance_data_get_for_project(context, project_id) + used_instances, used_cores, used_ram = usage + quota = get_project_quotas(context, project_id) + allowed_instances = _get_request_allotment(requested_instances, + used_instances, quota['instances']) - allowed_cores = _get_request_allotment(num_cores, used_cores, + allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) + allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, - int(allowed_cores // instance_type['vcpus'])) - return min(num_instances, allowed_instances) + allowed_cores // instance_type['vcpus'], + allowed_ram // instance_type['memory_mb']) + return min(requested_instances, allowed_instances) -def allowed_volumes(context, num_volumes, size): - """Check quota and return min(num_volumes, allowed_volumes).""" +def allowed_volumes(context, requested_volumes, size): + """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) - num_gigabytes = num_volumes * size + requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) - quota = get_quota(context, project_id) - allowed_volumes = _get_request_allotment(num_volumes, used_volumes, + quota = get_project_quotas(context, project_id) + allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) - allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes, + allowed_gigabytes = _get_request_allotment(requested_gigabytes, + used_gigabytes, quota['gigabytes']) allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) - return min(num_volumes, allowed_volumes) + return min(requested_volumes, allowed_volumes) -def allowed_floating_ips(context, num_floating_ips): - """Check quota and return min(num_floating_ips, allowed_floating_ips).""" +def allowed_floating_ips(context, requested_floating_ips): + """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) - quota = get_quota(context, project_id) - allowed_floating_ips = _get_request_allotment(num_floating_ips, + quota = get_project_quotas(context, project_id) + allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) - return min(num_floating_ips, allowed_floating_ips) + return min(requested_floating_ips, allowed_floating_ips) -def allowed_metadata_items(context, num_metadata_items): - """Check quota; return min(num_metadata_items,allowed_metadata_items).""" - project_id = context.project_id - context = context.elevated() - quota = get_quota(context, project_id) - allowed_metadata_items = _get_request_allotment(num_metadata_items, 0, - quota['metadata_items']) - return min(num_metadata_items, allowed_metadata_items) +def _calculate_simple_quota(context, resource, requested): + """Check quota for resource; return min(requested, allowed).""" + quota = get_project_quotas(context, context.project_id) + allowed = _get_request_allotment(requested, 0, quota[resource]) + return min(requested, allowed) -def allowed_injected_files(context): +def allowed_metadata_items(context, requested_metadata_items): + """Return the number of metadata items allowed.""" + return _calculate_simple_quota(context, 'metadata_items', + requested_metadata_items) + + +def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" - return FLAGS.quota_max_injected_files + return _calculate_simple_quota(context, 'injected_files', + requested_injected_files) -def allowed_injected_file_content_bytes(context): +def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" - return FLAGS.quota_max_injected_file_content_bytes + resource = 'injected_file_content_bytes' + return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c3..c5277c6a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,12 +28,15 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection from carrot import messaging from eventlet import greenpool -from eventlet import greenthread +from eventlet import pools +from eventlet import queue +import greenlet from nova import context from nova import exception @@ -47,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -90,6 +96,22 @@ class Connection(carrot_connection.BrokerConnection): return cls.instance() +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return Connection.instance(new=True) + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + class Consumer(messaging.Consumer): """Consumer base class. @@ -131,7 +153,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -159,13 +183,13 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) + def process_data(self, message_data, message): + """Consumer callback to call a method on a proxy object. - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -175,8 +199,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) - + # This will be popped off in _unpack_context + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -188,8 +212,17 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -197,7 +230,18 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - msg_reply(msg_id, rval, None) + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + msg_reply(msg_id, x, None) + else: + msg_reply(msg_id, rval, None) + + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) + elif isinstance(rval, types.GeneratorType): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -205,11 +249,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -242,6 +281,58 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection.""" + + def __init__(self, connection, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.enabled = True + self.init(connection) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + running = True + while running: + it = self.consumer_set.iterconsume(limit=limit) + if not it: + break + while True: + try: + it.next() + except StopIteration: + return + except greenlet.GreenletExit: + running = False + break + except Exception as e: + LOG.exception(_("Exception while processing consumer")) + self.reconnect() + # Break to outer loop + break + + def close(self): + self.consumer_set.close() + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -306,16 +397,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - except TypeError: - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() + + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -347,8 +440,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -360,70 +454,112 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() - conn = Connection.instance() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() - publisher = TopicPublisher(connection=conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + ConnectionPool.put(self._consumer.connection) + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + while True: + rv = None + while rv is None and not self._closed: + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + time.sleep(0.01) + + result = self._results.get() + if isinstance(result, Exception): + self.close() + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): @@ -459,6 +595,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': diff --git a/nova/service.py b/nova/service.py index ab1238c3b..74f9f04d8 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,14 +19,11 @@ """Generic Node baseclass for all workers that run on hosts.""" +import greenlet import inspect import os -import sys -import time -from eventlet import event from eventlet import greenthread -from eventlet import greenpool from nova import context from nova import db @@ -91,27 +88,37 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) + self.conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % + self.topic) + + # Share this same connection for these Consumers + consumer_all = rpc.TopicAdapterConsumer( + connection=self.conn, + topic=self.topic, + proxy=self) + consumer_node = rpc.TopicAdapterConsumer( + connection=self.conn, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=self.conn, + topic=self.topic, + proxy=self) + consumer_set = rpc.ConsumerSet( + connection=self.conn, + consumer_list=[consumer_all, consumer_node, fanout]) + + # Wait forever, processing these consumers + def _wait(): + try: + consumer_set.wait() + finally: + consumer_set.close() + + self.consumer_set_thread = greenthread.spawn(_wait) + if self.report_interval: - consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=conn2, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=conn3, - topic=self.topic, - proxy=self) - - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - self.timers.append(fanout.attach_to_eventlet()) - pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) @@ -174,6 +181,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): + self.consumer_set_thread.kill() + try: + self.consumer_set_thread.wait() + except greenlet.GreenletExit: + pass for x in self.timers: try: x.stop() diff --git a/nova/test.py b/nova/test.py index 4deb2a175..80b2d0a74 100644 --- a/nova/test.py +++ b/nova/test.py @@ -31,17 +31,15 @@ import uuid import unittest import mox -import shutil import stubout from eventlet import greenthread -from nova import context -from nova import db from nova import fakerabbit from nova import flags from nova import rpc from nova import service from nova import wsgi +from nova.virt import fake FLAGS = flags.FLAGS @@ -85,6 +83,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() + rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" @@ -99,6 +98,10 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() + if FLAGS.connection_type == 'fake': + if hasattr(fake.FakeConnection, '_instance'): + del fake.FakeConnection._instance + # Reset any overriden flags self.reset_flags() diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 5d7ca98b5..ecefc464a 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,24 +21,24 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' -FLAGS.connection_type = 'fake' -FLAGS.fake_rabbit = True +FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver') +FLAGS['connection_type'].SetDefault('fake') +FLAGS['fake_rabbit'].SetDefault(True) flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' +FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 8 -FLAGS.num_networks = 2 -FLAGS.fake_network = True -FLAGS.image_service = 'nova.image.local.LocalImageService' +FLAGS['network_size'].SetDefault(8) +FLAGS['num_networks'].SetDefault(2) +FLAGS['fake_network'].SetDefault(True) +FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.num_shelves = 2 -FLAGS.blades_per_shelf = 4 -FLAGS.iscsi_num_targets = 8 -FLAGS.verbose = True -FLAGS.sqlite_db = "tests.sqlite" -FLAGS.use_ipv6 = True +FLAGS['num_shelves'].SetDefault(2) +FLAGS['blades_per_shelf'].SetDefault(4) +FLAGS['iscsi_num_targets'].SetDefault(8) +FLAGS['verbose'].SetDefault(True) +FLAGS['sqlite_db'].SetDefault("tests.sqlite") +FLAGS['use_ipv6'].SetDefault(True) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py deleted file mode 100644 index 71da04992..000000000 --- a/nova/tests/real_flags.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import flags - -FLAGS = flags.FLAGS - -FLAGS.connection_type = 'libvirt' -FLAGS.fake_rabbit = False -FLAGS.fake_network = False -FLAGS.verbose = False diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 97f401b87..7c0331eff 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase): self.manager.delete_project(project) self.manager.delete_user(user) + def test_create_duplicate_key_pair(self): + """Test that, after successfully generating a keypair, + requesting a second keypair with the same name fails sanely""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + self.ec2.create_key_pair('test') + + try: + self.ec2.create_key_pair('test') + except EC2ResponseError, e: + if e.code == 'KeyPairExists': + pass + else: + self.fail("Unexpected EC2ResponseError: %s " + "(expected KeyPairExists)" % e.code) + else: + self.fail('Exception not raised.') + def test_get_all_security_groups(self): """Test that we can retrieve security groups""" self.expect_http() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..34a73ad1f 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,9 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +29,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,14 +73,21 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -113,7 +114,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +129,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -188,6 +186,52 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + def test_describe_snapshots(self): + """Makes sure describe_snapshots works and filters results.""" + vol = db.volume_create(self.context, {}) + snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 2) + snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') + result = self.cloud.describe_snapshots(self.context, + snapshot_id=[snapshot_id]) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), + snap2['id']) + db.snapshot_destroy(self.context, snap1['id']) + db.snapshot_destroy(self.context, snap2['id']) + db.volume_destroy(self.context, vol['id']) + + def test_create_snapshot(self): + """Makes sure create_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + result = self.cloud.create_snapshot(self.context, + volume_id=volume_id) + snapshot_id = result['snapshotId'] + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) + + db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) + db.volume_destroy(self.context, vol['id']) + + def test_delete_snapshot(self): + """Makes sure delete_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.delete_snapshot(self.context, + snapshot_id=snapshot_id) + self.assertTrue(result) + + db.volume_destroy(self.context, vol['id']) + def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', @@ -306,31 +350,25 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 707300fcf..05319d91f 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase): self.assert_('runtime_answer' in self.global_FLAGS) self.assertEqual(self.global_FLAGS.runtime_answer, 60) + def test_long_vs_short_flags(self): + flags.DEFINE_string('duplicate_answer_long', 'val', 'desc', + flag_values=self.global_FLAGS) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + flags.DEFINE_integer('duplicate_answer', 60, 'desc', + flag_values=self.global_FLAGS) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') FLAGS.flags_unittest = 'bar' diff --git a/nova/tests/test_virt.py b/nova/tests/test_libvirt.py similarity index 96% rename from nova/tests/test_virt.py rename to nova/tests/test_libvirt.py index 1bec9caca..4efdd6ae9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_libvirt.py @@ -32,7 +32,8 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.compute import power_state -from nova.virt import libvirt_conn +from nova.virt.libvirt import connection +from nova.virt.libvirt import firewall libvirt = None FLAGS = flags.FLAGS @@ -83,7 +84,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_same_fname_concurrency(self): """Ensures that the same fname cache runs at a sequentially""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -104,7 +105,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -125,7 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() - libvirt_conn._late_load_cheetah() + connection._late_load_cheetah() self.flags(fake_call=True) self.manager = manager.AuthManager() @@ -171,8 +172,8 @@ class LibvirtConnTestCase(test.TestCase): return False global libvirt libvirt = __import__('libvirt') - libvirt_conn.libvirt = __import__('libvirt') - libvirt_conn.libxml2 = __import__('libxml2') + connection.libvirt = __import__('libvirt') + connection.libxml2 = __import__('libxml2') return True def create_fake_libvirt_mock(self, **kwargs): @@ -182,7 +183,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeLibvirtConnection(object): pass - # A fake libvirt_conn.IptablesFirewallDriver + # A fake connection.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): def __init__(self, **kwargs): @@ -198,11 +199,11 @@ class LibvirtConnTestCase(test.TestCase): for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for libvirt_conn.LibvirtConnection - self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') - libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn = fake + # Inevitable mocks for connection.LibvirtConnection + self.mox.StubOutWithMock(connection.utils, 'import_class') + connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn = fake def create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), @@ -214,7 +215,7 @@ class LibvirtConnTestCase(test.TestCase): return db.service_create(context.get_admin_context(), service_ref) def test_preparing_xml_info(self): - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) result = conn._prepare_xml_info(instance_ref, False) @@ -229,7 +230,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(len(result['nics']) == 2) def test_get_nic_for_xml_v4(self): - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) network, mapping = _create_network_info()[0] self.flags(use_ipv6=False) params = conn._get_nic_for_xml(network, mapping)['extra_params'] @@ -237,7 +238,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(params.find('PROJMASKV6') == -1) def test_get_nic_for_xml_v6(self): - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) network, mapping = _create_network_info()[0] self.flags(use_ipv6=True) params = conn._get_nic_for_xml(network, mapping)['extra_params'] @@ -282,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase): def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) xml = conn.to_xml(instance_ref, False, network_info) tree = xml_to_tree(xml) @@ -313,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase): 'instance_id': instance_ref['id']}) self.flags(libvirt_type='lxc') - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') @@ -419,7 +420,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, expected_uri) @@ -446,7 +447,7 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.libvirt_uri = testuri for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) @@ -470,13 +471,13 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock(getVersion=getVersion, getType=getType, listDomainsID=listDomainsID) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + self.mox.StubOutWithMock(connection.LibvirtConnection, 'get_cpu_info') - libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] @@ -510,7 +511,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(exception.ComputeServiceUnavailable, conn.update_available_resource, self.context, 'dummy') @@ -545,7 +546,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() try: - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) @@ -594,7 +595,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', @@ -623,7 +624,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) @@ -647,7 +648,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(count) def test_get_host_ip_addr(self): - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) ip = conn.get_host_ip_addr() self.assertEquals(ip, FLAGS.my_ip) @@ -671,7 +672,7 @@ class IptablesFirewallTestCase(test.TestCase): class FakeLibvirtConnection(object): pass self.fake_libvirt_connection = FakeLibvirtConnection() - self.fw = libvirt_conn.IptablesFirewallDriver( + self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) def tearDown(self): @@ -895,7 +896,7 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall( + self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) def tearDown(self): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91eb..ffd748efe 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,14 +42,55 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -58,11 +98,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -81,7 +122,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -108,25 +149,80 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns and reuses separate connections. + + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + + """ + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and connection uniqueness.""" + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. - Uses static methods because we aren't actually storing any state""" + Uses static methods because we aren't actually storing any state. + + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 236d12434..3472b1f59 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -176,6 +176,34 @@ class VolumeTestCase(test.TestCase): # This will allow us to test cross-node interactions pass + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap)['id'] + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id) + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume_id) + class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index be1e35697..9d56c1644 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase): os_type="linux") self.check_vm_params_for_linux() + def test_spawn_vhd_glance_swapdisk(self): + # Change the default host_call_plugin to one that'll return + # a swap disk + orig_func = stubs.FakeSessionForVMTests.host_call_plugin + + stubs.FakeSessionForVMTests.host_call_plugin = \ + stubs.FakeSessionForVMTests.host_call_plugin_swap + + try: + # We'll steal the above glance linux test + self.test_spawn_vhd_glance_linux() + finally: + # Make sure to put this back + stubs.FakeSessionForVMTests.host_call_plugin = orig_func + + # We should have 2 VBDs. + self.assertEqual(len(self.vm['VBDs']), 2) + # Now test that we have 1. + self.tearDown() + self.setUp() + self.test_spawn_vhd_glance_linux() + self.assertEqual(len(self.vm['VBDs']), 1) + def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, @@ -569,11 +592,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): bob_shared = self.bob.compute_shared(alice_pub) self.assertEquals(alice_shared, bob_shared) - def test_encryption(self): - msg = "This is a top-secret message" - enc = self.alice.encrypt(msg) + def _test_encryption(self, message): + enc = self.alice.encrypt(message) + self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) - self.assertEquals(dec, msg) + self.assertEquals(dec, message) + + def test_encrypt_simple_message(self): + self._test_encryption('This is a simple message.') + + def test_encrypt_message_with_newlines_at_end(self): + self._test_encryption('This message has a newline at the end.\n') + + def test_encrypt_many_newlines_at_end(self): + self._test_encryption('Message with lotsa newlines.\n\n\n') + + def test_encrypt_newlines_inside_message(self): + self._test_encryption('Message\nwith\ninterior\nnewlines.') + + def test_encrypt_with_leading_newlines(self): + self._test_encryption('\n\nMessage with leading newlines.') + + def test_encrypt_really_long_message(self): + self._test_encryption(''.join(['abcd' for i in xrange(1024)])) def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4833ccb07..35308d95f 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" import eventlet +import json from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return vdi_uuid + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -132,11 +133,30 @@ class FakeSessionForVMTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVMTests, self).__init__(uri) - def host_call_plugin(self, _1, _2, _3, _4, _5): + def host_call_plugin(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) - return '%s' % vdi_rec['uuid'] + if plugin == "glance" and method == "download_vhd": + ret_str = json.dumps([dict(vdi_type='os', + vdi_uuid=vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str + + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) + ret_str = json.dumps( + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref)