diff --git a/.mailmap b/.mailmap index 95bf1a1bc18a..9c1e08269407 100644 --- a/.mailmap +++ b/.mailmap @@ -1,37 +1,44 @@ # Format is: -# - - - - - - - - - +# +# - + + + + + + + + + + + + + - + Masumoto + + + - + + + + - + + + + - - - - - - - + diff --git a/Authors b/Authors index 168a3cf9abba..723fe88641f0 100644 --- a/Authors +++ b/Authors @@ -4,13 +4,16 @@ Anthony Young Antony Messerli Armando Migliaccio Bilal Akhtar +Brian Lamar +Brian Schott +Brian Waldon Chiradeep Vittal Chmouel Boudjnah Chris Behrens Christian Berendt Cory Wright -David Pravec Dan Prince +David Pravec Dean Troyer Devin Carlen Ed Leafe @@ -41,7 +44,7 @@ Monsyne Dragon Monty Taylor MORITA Kazutaka Muneyuki Noguchi -Nachi Ueno +Nachi Ueno Naveed Massjouni Paul Voccio Ricardo Carrillo Cruz @@ -56,7 +59,8 @@ Soren Hansen Thierry Carrez Todd Willey Trey Morris -Tushar Patil +Tushar Patil +Vasiliy Shlykov Vishvananda Ishaya Youcef Laribi Zhixue Wu diff --git a/HACKING b/HACKING index 3af2381bfb83..e58d60e582a4 100644 --- a/HACKING +++ b/HACKING @@ -47,3 +47,22 @@ Human Alphabetical Order Examples from nova.auth import users from nova.endpoint import api from nova.endpoint import cloud + +Docstrings +---------- + """Summary of the function, class or method, less than 80 characters. + + New paragraph after newline that explains in more detail any general + information about the function, class or method. After this, if defining + parameters and return types use the Sphinx format. After that an extra + newline then close the quotations. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: description of the return value + + """ diff --git a/MANIFEST.in b/MANIFEST.in index 3908830d79ef..f0a9cffb3ac6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,14 +6,23 @@ graft doc graft smoketests graft tools graft etc +graft bzrplugins +graft contrib +graft po +graft plugins include nova/api/openstack/notes.txt +include nova/auth/*.schema include nova/auth/novarc.template +include nova/auth/opendj.sh include nova/auth/slap.sh include nova/cloudpipe/bootscript.sh include nova/cloudpipe/client.ovpn.template +include nova/cloudpipe/bootscript.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template +include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg +include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template include nova/tests/CA/ @@ -25,6 +34,7 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 +include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/bin/nova-manage b/bin/nova-manage index 7835ca551ac5..6d67252b8b81 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -433,6 +433,37 @@ class ProjectCommands(object): "nova-api server on this host.") +class FixedIpCommands(object): + """Class for managing fixed ip.""" + + def list(self, host=None): + """Lists all fixed ips (optionally by host) arguments: [host]""" + ctxt = context.get_admin_context() + if host == None: + fixed_ips = db.fixed_ip_get_all(ctxt) + else: + fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) + + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'), + _('IP address'), + _('MAC address'), + _('hostname'), + _('host')) + for fixed_ip in fixed_ips: + hostname = None + host = None + mac_address = None + if fixed_ip['instance']: + instance = fixed_ip['instance'] + hostname = instance['hostname'] + host = instance['host'] + mac_address = instance['mac_address'] + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( + fixed_ip['network']['cidr'], + fixed_ip['address'], + mac_address, hostname, host) + + class FloatingIpCommands(object): """Class for managing floating ip.""" @@ -472,8 +503,8 @@ class NetworkCommands(object): """Class for managing networks.""" def create(self, fixed_range=None, num_networks=None, - network_size=None, vlan_start=None, vpn_start=None, - fixed_range_v6=None): + network_size=None, vlan_start=None, + vpn_start=None, fixed_range_v6=None, label='public'): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], @@ -495,9 +526,22 @@ class NetworkCommands(object): cidr=fixed_range, num_networks=int(num_networks), network_size=int(network_size), - cidr_v6=fixed_range_v6, vlan_start=int(vlan_start), - vpn_start=int(vpn_start)) + vpn_start=int(vpn_start), + cidr_v6=fixed_range_v6, + label=label) + + def list(self): + """List all created networks""" + print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'), + _('netmask'), + _('start address'), + 'DNS') + for network in db.network_get_all(context.get_admin_context()): + print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr, + network.netmask, + network.dhcp_start, + network.dns) class ServiceCommands(object): @@ -579,6 +623,13 @@ class VolumeCommands(object): ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + if volume['status'] == 'in-use': print "Volume is in-use." print "Detach volume from instance and then try again." @@ -615,6 +666,7 @@ CATEGORIES = [ ('role', RoleCommands), ('shell', ShellCommands), ('vpn', VpnCommands), + ('fixed', FixedIpCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), ('service', ServiceCommands), diff --git a/locale/nova.pot b/locale/nova.pot index a96411e33922..53e38c619160 100644 --- a/locale/nova.pot +++ b/locale/nova.pot @@ -1826,7 +1826,7 @@ msgstr "" #: nova/virt/xenapi/vm_utils.py:290 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "PV Kernel in VDI:%s" msgstr "" #: nova/virt/xenapi/vm_utils.py:318 diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index ddcdc673c794..1a06b3f010c9 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -21,7 +21,6 @@ Starting point for routing EC2 requests. """ import datetime -import routes import webob import webob.dec import webob.exc @@ -233,7 +232,7 @@ class Authorizer(wsgi.Middleware): super(Authorizer, self).__init__(application) self.action_roles = { 'CloudController': { - 'DescribeAvailabilityzones': ['all'], + 'DescribeAvailabilityZones': ['all'], 'DescribeRegions': ['all'], 'DescribeSnapshots': ['all'], 'DescribeKeyPairs': ['all'], diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c80e1168a620..6919cd8d2ecf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -327,7 +327,9 @@ class CloudController(object): if not group_name is None: groups = [g for g in groups if g.name in group_name] - return {'securityGroupInfo': groups} + return {'securityGroupInfo': + list(sorted(groups, + key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} @@ -839,11 +841,26 @@ class CloudController(object): self.compute_api.update(context, instance_id=instance_id, **kwargs) return True + def _format_image(self, context, image): + """Convert from format defined by BaseImageService to S3 format.""" + i = {} + i['imageId'] = image.get('id') + i['kernelId'] = image.get('kernel_id') + i['ramdiskId'] = image.get('ramdisk_id') + i['imageOwnerId'] = image.get('owner_id') + i['imageLocation'] = image.get('location') + i['imageState'] = image.get('status') + i['type'] = image.get('type') + i['isPublic'] = image.get('is_public') + i['architecture'] = image.get('architecture') + return i + def describe_images(self, context, image_id=None, **kwargs): - # Note: image_id is a list! + # NOTE: image_id is a list! images = self.image_service.index(context) if image_id: - images = filter(lambda x: x['imageId'] in image_id, images) + images = filter(lambda x: x['id'] in image_id, images) + images = [self._format_image(context, i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): @@ -866,6 +883,9 @@ class CloudController(object): % attribute) try: image = self.image_service.show(context, image_id) + image = self._format_image(context, + self.image_service.show(context, + image_id)) except IndexError: raise exception.ApiError(_('invalid id: %s') % image_id) result = {'image_id': image_id, 'launchPermission': []} diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 056c7dd27fff..d0b18eced4bb 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -34,6 +34,7 @@ from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import servers from nova.api.openstack import shared_ip_groups +from nova.api.openstack import zones LOG = logging.getLogger('nova.api.openstack') @@ -79,6 +80,10 @@ class APIRouter(wsgi.Router): server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' + server_members['reset_network'] = 'POST' + + mapper.resource("zone", "zones", controller=zones.Controller(), + collection={'detail': 'GET'}) mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 1dfdd5318c2b..473071738ea7 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -19,6 +19,7 @@ import datetime import hashlib import json import time +import logging import webob.exc import webob.dec diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 6d2fa16e8ec5..1dc3767e2d44 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -18,22 +18,29 @@ from nova import exception -def limited(items, req): - """Return a slice of items according to requested offset and limit. - - items - a sliceable - req - wobob.Request possibly containing offset and limit GET variables. - offset is where to start in the list, and limit is the maximum number - of items to return. - - If limit is not specified, 0, or > 1000, defaults to 1000. +def limited(items, request, max_limit=1000): """ + Return a slice of items according to requested offset and limit. - offset = int(req.GET.get('offset', 0)) - limit = int(req.GET.get('limit', 0)) - if not limit: - limit = 1000 - limit = min(1000, limit) + @param items: A sliceable entity + @param request: `webob.Request` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. + @kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + offset = 0 + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + limit = max_limit + + limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 323e6fda684d..35dd045d1509 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 OpenStack LLC. # All Rights Reserved. # @@ -65,6 +63,22 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) + + # grab single private fixed ip + try: + private_ip = inst['fixed_ip']['address'] + if private_ip: + inst_dict['addresses']['private'].append(private_ip) + except KeyError: + LOG.debug(_("Failed to read private ip")) + + # grab all public floating ips + try: + for floating in inst['fixed_ip']['floating_ips']: + inst_dict['addresses']['public'].append(floating['address']) + except KeyError: + LOG.debug(_("Failed to read public ip(s)")) + inst_dict['metadata'] = {} inst_dict['hostId'] = '' @@ -167,7 +181,8 @@ class Controller(wsgi.Controller): display_name=env['server']['name'], display_description=env['server']['name'], key_name=key_pair['name'], - key_data=key_pair['public_key']) + key_data=key_pair['public_key'], + onset_files=env.get('onset_files', [])) return _translate_keys(instances[0]) def update(self, req, id): @@ -253,6 +268,20 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def reset_network(self, req, id): + """ + Reset networking on an instance (admin only). + + """ + context = req.environ['nova.context'] + try: + self.compute_api.reset_network(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::reset_network %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py new file mode 100644 index 000000000000..830464ffd63a --- /dev/null +++ b/nova/api/openstack/zones.py @@ -0,0 +1,80 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import common +import logging + +from nova import flags +from nova import wsgi +from nova import db + + +FLAGS = flags.FLAGS + + +def _filter_keys(item, keys): + """ + Filters all model attributes except for keys + item is a dict + + """ + return dict((k, v) for k, v in item.iteritems() if k in keys) + + +def _scrub_zone(zone): + return _filter_keys(zone, ('id', 'api_url')) + + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "zone": ["id", "api_url"]}}} + + def index(self, req): + """Return all zones in brief""" + items = db.zone_get_all(req.environ['nova.context']) + items = common.limited(items, req) + items = [_scrub_zone(item) for item in items] + return dict(zones=items) + + def detail(self, req): + """Return all zones in detail""" + return self.index(req) + + def show(self, req, id): + """Return data about the given zone id""" + zone_id = int(id) + zone = db.zone_get(req.environ['nova.context'], zone_id) + return dict(zone=_scrub_zone(zone)) + + def delete(self, req, id): + zone_id = int(id) + db.zone_delete(req.environ['nova.context'], zone_id) + return {} + + def create(self, req): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req) + zone = db.zone_create(context, env["zone"]) + return dict(zone=_scrub_zone(zone)) + + def update(self, req, id): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req) + zone_id = int(id) + zone = db.zone_update(context, zone_id, env["zone"]) + return dict(zone=_scrub_zone(zone)) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e652f1caaa60..5da7751a015a 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -74,6 +74,25 @@ LOG = logging.getLogger("nova.ldapdriver") # in which we may want to change the interface a bit more. +def _clean(attr): + """Clean attr for insertion into ldap""" + if attr is None: + return None + if type(attr) is unicode: + return str(attr) + return attr + + +def sanitize(fn): + """Decorator to sanitize all args""" + def _wrapped(self, *args, **kwargs): + args = [_clean(x) for x in args] + kwargs = dict((k, _clean(v)) for (k, v) in kwargs) + return fn(self, *args, **kwargs) + _wrapped.func_name = fn.func_name + return _wrapped + + class LdapDriver(object): """Ldap Auth driver @@ -106,23 +125,27 @@ class LdapDriver(object): self.conn.unbind_s() return False + @sanitize def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) return self.__to_user(attr) + @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree return self.__to_user(self.__find_object(dn, query)) + @sanitize def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid) attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) + @sanitize def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, @@ -134,6 +157,7 @@ class LdapDriver(object): users.append(user) return users + @sanitize def get_projects(self, uid=None): """Retrieve list of projects""" pattern = LdapDriver.project_pattern @@ -143,6 +167,7 @@ class LdapDriver(object): pattern) return [self.__to_project(attr) for attr in attrs] + @sanitize def create_user(self, name, access_key, secret_key, is_admin): """Create a user""" if self.__user_exists(name): @@ -196,6 +221,7 @@ class LdapDriver(object): self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) + @sanitize def create_project(self, name, manager_uid, description=None, member_uids=None): """Create a project""" @@ -231,6 +257,7 @@ class LdapDriver(object): self.conn.add_s(dn, attr) return self.__to_project(dict(attr)) + @sanitize def modify_project(self, project_id, manager_uid=None, description=None): """Modify an existing project""" if not manager_uid and not description: @@ -249,21 +276,25 @@ class LdapDriver(object): dn = self.__project_to_dn(project_id) self.conn.modify_s(dn, attr) + @sanitize def add_to_project(self, uid, project_id): """Add user to project""" dn = self.__project_to_dn(project_id) return self.__add_to_group(uid, dn) + @sanitize def remove_from_project(self, uid, project_id): """Remove user from project""" dn = self.__project_to_dn(project_id) return self.__remove_from_group(uid, dn) + @sanitize def is_in_project(self, uid, project_id): """Check if user is in project""" dn = self.__project_to_dn(project_id) return self.__is_in_group(uid, dn) + @sanitize def has_role(self, uid, role, project_id=None): """Check if user has role @@ -273,6 +304,7 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__is_in_group(uid, role_dn) + @sanitize def add_role(self, uid, role, project_id=None): """Add role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) @@ -283,11 +315,13 @@ class LdapDriver(object): else: return self.__add_to_group(uid, role_dn) + @sanitize def remove_role(self, uid, role, project_id=None): """Remove role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + @sanitize def get_user_roles(self, uid, project_id=None): """Retrieve list of roles for user (or user and project)""" if project_id is None: @@ -307,6 +341,7 @@ class LdapDriver(object): roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] + @sanitize def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -332,12 +367,14 @@ class LdapDriver(object): # Delete entry self.conn.delete_s(self.__uid_to_dn(uid)) + @sanitize def delete_project(self, project_id): """Delete a project""" project_dn = self.__project_to_dn(project_id) self.__delete_roles(project_dn) self.__delete_group(project_dn) + @sanitize def modify_user(self, uid, access_key=None, secret_key=None, admin=None): """Modify an existing user""" if not access_key and not secret_key and admin is None: diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index c53a4acdc287..cda2ecc28b68 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" -export CLOUD_SERVERS_API_KEY="%(access)s" -export CLOUD_SERVERS_USERNAME="%(user)s" -export CLOUD_SERVERS_URL="%(os)s" - +export NOVA_API_KEY="%(access)s" +export NOVA_USERNAME="%(user)s" +export NOVA_URL="%(os)s" diff --git a/nova/compute/api.py b/nova/compute/api.py index ac02dbcfa882..0d2690c72825 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -67,10 +67,10 @@ class API(base.Base): """Get the network topic for an instance.""" try: instance = self.get(context, instance_id) - except exception.NotFound as e: + except exception.NotFound: LOG.warning(_("Instance %d was not found in get_network_topic"), instance_id) - raise e + raise host = instance['host'] if not host: @@ -85,10 +85,11 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None): + availability_zone=None, user_data=None, + onset_files=None): """Create the number of instances requested if quota and - other arguments check out ok.""" - + other arguments check out ok. + """ type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: @@ -103,9 +104,9 @@ class API(base.Base): if not is_vpn: image = self.image_service.show(context, image_id) if kernel_id is None: - kernel_id = image.get('kernelId', None) + kernel_id = image.get('kernel_id', None) if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) + ramdisk_id = image.get('ramdisk_id', None) # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None @@ -156,7 +157,6 @@ class API(base.Base): 'key_data': key_data, 'locked': False, 'availability_zone': availability_zone} - elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) @@ -193,7 +193,8 @@ class API(base.Base): {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, - "availability_zone": availability_zone}}) + "availability_zone": availability_zone, + "onset_files": onset_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -293,10 +294,10 @@ class API(base.Base): LOG.debug(_("Going to try to terminate %s"), instance_id) try: instance = self.get(context, instance_id) - except exception.NotFound as e: + except exception.NotFound: LOG.warning(_("Instance %d was not found during terminate"), instance_id) - raise e + raise if (instance['state_description'] == 'terminating'): LOG.warning(_("Instance %d is already being terminated"), @@ -434,6 +435,10 @@ class API(base.Base): """Set the root/admin password for the given instance.""" self._cast_compute_message('set_admin_password', context, instance_id) + def inject_file(self, context, instance_id): + """Write a file to the given instance.""" + self._cast_compute_message('inject_file', context, instance_id) + def get_ajax_console(self, context, instance_id): """Get a url to an AJAX Console""" instance = self.get(context, instance_id) @@ -466,6 +471,13 @@ class API(base.Base): instance = self.get(context, instance_id) return instance['locked'] + def reset_network(self, context, instance_id): + """ + Reset networking on the instance. + + """ + self._cast_compute_message('reset_network', context, instance_id) + def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f4418af26027..b8d4b7ee9a24 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -34,6 +34,7 @@ terminating it. :func:`nova.utils.import_object` """ +import base64 import datetime import random import string @@ -127,10 +128,10 @@ class ComputeManager(manager.Manager): info = self.driver.get_info(instance_ref['name']) state = info['state'] except exception.NotFound: - state = power_state.NOSTATE + state = power_state.FAILED self.db.instance_set_state(context, instance_id, state) - def get_console_topic(self, context, **_kwargs): + def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host Currently this is just set in the flags for each compute host.""" @@ -139,7 +140,7 @@ class ComputeManager(manager.Manager): FLAGS.console_topic, FLAGS.console_host) - def get_network_topic(self, context, **_kwargs): + def get_network_topic(self, context, **kwargs): """Retrieves the network host for a project on this host""" # TODO(vish): This method should be memoized. This will make # the call to get_network_host cheaper, so that @@ -158,21 +159,22 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def refresh_security_group_rules(self, context, - security_group_id, **_kwargs): + security_group_id, **kwargs): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, - security_group_id, **_kwargs): + security_group_id, **kwargs): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception - def run_instance(self, context, instance_id, **_kwargs): + def run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) + instance_ref.onset_files = kwargs.get('onset_files', []) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, @@ -323,28 +325,43 @@ class ComputeManager(manager.Manager): """Set the root/admin password for an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to reset the password on a non-running ' - 'instance: %s (state: %s expected: %s)', - instance_ref['id'], - instance_ref['state'], - power_state.RUNNING) - - logging.debug('instance %s: setting admin password', + instance_id = instance_ref['id'] + instance_state = instance_ref['state'] + expected_state = power_state.RUNNING + if instance_state != expected_state: + LOG.warn(_('trying to reset the password on a non-running ' + 'instance: %(instance_id)s (state: %(instance_state)s ' + 'expected: %(expected_state)s)') % locals()) + LOG.audit(_('instance %s: setting admin password'), instance_ref['name']) if new_pass is None: # Generate a random password - new_pass = self._generate_password(FLAGS.password_length) - + new_pass = utils.generate_password(FLAGS.password_length) self.driver.set_admin_password(instance_ref, new_pass) self._update_state(context, instance_id) - def _generate_password(self, length=20): - """Generate a random sequence of letters and digits - to be used as a password. - """ - chrs = string.letters + string.digits - return "".join([random.choice(chrs) for i in xrange(length)]) + @exception.wrap_exception + @checks_instance_lock + def inject_file(self, context, instance_id, path, file_contents): + """Write a file to the specified path on an instance on this server""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + instance_id = instance_ref['id'] + instance_state = instance_ref['state'] + expected_state = power_state.RUNNING + if instance_state != expected_state: + LOG.warn(_('trying to inject a file into a non-running ' + 'instance: %(instance_id)s (state: %(instance_state)s ' + 'expected: %(expected_state)s)') % locals()) + # Files/paths *should* be base64-encoded at this point, but + # double-check to make sure. + b64_path = utils.ensure_b64_encoding(path) + b64_contents = utils.ensure_b64_encoding(file_contents) + plain_path = base64.b64decode(b64_path) + nm = instance_ref['name'] + msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals() + LOG.audit(msg) + self.driver.inject_file(instance_ref, b64_path, b64_contents) @exception.wrap_exception @checks_instance_lock @@ -498,6 +515,18 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] + @checks_instance_lock + def reset_network(self, context, instance_id): + """ + Reset networking on the instance. + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + LOG.debug(_('instance %s: reset network'), instance_id, + context=context) + self.driver.reset_network(instance_ref) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" @@ -511,7 +540,7 @@ class ComputeManager(manager.Manager): def get_ajax_console(self, context, instance_id): """Return connection information for an ajax console""" context = context.elevated() - logging.debug(_("instance %s: getting ajax console"), instance_id) + LOG.debug(_("instance %s: getting ajax console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_ajax_console(instance_ref) diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py index 37039d2ec9f9..adfc2dff005a 100644 --- a/nova/compute/power_state.py +++ b/nova/compute/power_state.py @@ -27,6 +27,7 @@ SHUTDOWN = 0x04 SHUTOFF = 0x05 CRASHED = 0x06 SUSPENDED = 0x07 +FAILED = 0x08 def name(code): @@ -38,5 +39,6 @@ def name(code): SHUTDOWN: 'shutdown', SHUTOFF: 'shutdown', CRASHED: 'crashed', - SUSPENDED: 'suspended'} + SUSPENDED: 'suspended', + FAILED: 'failed to spawn'} return d[code] diff --git a/nova/context.py b/nova/context.py index f2669c9f1785..0256bf448dba 100644 --- a/nova/context.py +++ b/nova/context.py @@ -28,7 +28,6 @@ from nova import utils class RequestContext(object): - def __init__(self, user, project, is_admin=None, read_deleted=False, remote_address=None, timestamp=None, request_id=None): if hasattr(user, 'id'): @@ -53,7 +52,7 @@ class RequestContext(object): self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: - timestamp = datetime.datetime.utcnow() + timestamp = utils.utcnow() if isinstance(timestamp, str) or isinstance(timestamp, unicode): timestamp = utils.parse_isotime(timestamp) self.timestamp = timestamp @@ -101,7 +100,7 @@ class RequestContext(object): return cls(**values) def elevated(self, read_deleted=False): - """Return a version of this context with admin flag set""" + """Return a version of this context with admin flag set.""" return RequestContext(self.user_id, self.project_id, True, diff --git a/nova/db/api.py b/nova/db/api.py index 789cb8ebbac6..d7f3746d2c29 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -288,11 +288,21 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) +def fixed_ip_get_all(context): + """Get all defined fixed ips.""" + return IMPL.fixed_ip_get_all(context) + + def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) +def fixed_ip_get_all_by_instance(context, instance_id): + """Get fixed ips by instance or raise if none exist.""" + return IMPL.fixed_ip_get_all_by_instance(context, instance_id) + + def fixed_ip_get_instance(context, address): """Get an instance for a fixed ip by address.""" return IMPL.fixed_ip_get_instance(context, address) @@ -500,6 +510,11 @@ def network_get(context, network_id): return IMPL.network_get(context, network_id) +def network_get_all(context): + """Return all defined networks.""" + return IMPL.network_get_all(context) + + # pylint: disable-msg=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" @@ -516,6 +531,11 @@ def network_get_by_instance(context, instance_id): return IMPL.network_get_by_instance(context, instance_id) +def network_get_all_by_instance(context, instance_id): + """Get all networks by instance id or raise if none exist.""" + return IMPL.network_get_all_by_instance(context, instance_id) + + def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) @@ -556,7 +576,7 @@ def project_get_network(context, project_id, associate=True): """ - return IMPL.project_get_network(context, project_id) + return IMPL.project_get_network(context, project_id, associate) def project_get_network_v6(context, project_id): @@ -980,3 +1000,31 @@ def console_get_all_by_instance(context, instance_id): def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) + + +#################### + + +def zone_create(context, values): + """Create a new child Zone entry.""" + return IMPL.zone_create(context, values) + + +def zone_update(context, zone_id, values): + """Update a child Zone entry.""" + return IMPL.zone_update(context, values) + + +def zone_delete(context, zone_id): + """Delete a child Zone.""" + return IMPL.zone_delete(context, zone_id) + + +def zone_get(context, zone_id): + """Get a specific child Zone.""" + return IMPL.zone_get(context, zone_id) + + +def zone_get_all(context): + """Get all child Zones.""" + return IMPL.zone_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 80c844635bb7..2697fac73a85 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -583,6 +583,17 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): return result.rowcount +@require_admin_context +def fixed_ip_get_all(context, session=None): + if not session: + session = get_session() + result = session.query(models.FixedIp).all() + if not result: + raise exception.NotFound(_('No fixed ips defined')) + + return result + + @require_context def fixed_ip_get_by_address(context, address, session=None): if not session: @@ -608,6 +619,17 @@ def fixed_ip_get_instance(context, address): return fixed_ip_ref.instance +@require_context +def fixed_ip_get_all_by_instance(context, instance_id): + session = get_session() + rv = session.query(models.FixedIp).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No address for instance %s') % instance_id) + return rv + + @require_context def fixed_ip_get_instance_v6(context, address): session = get_session() @@ -719,6 +741,7 @@ def instance_get_all(context): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -729,6 +752,7 @@ def instance_get_all_by_user(context, user_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @@ -740,6 +764,7 @@ def instance_get_all_by_host(context, host): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -753,6 +778,7 @@ def instance_get_all_by_project(context, project_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -766,6 +792,7 @@ def instance_get_all_by_reservation(context, reservation_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -773,6 +800,7 @@ def instance_get_all_by_reservation(context, reservation_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=context.project_id).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=False).\ @@ -1050,6 +1078,15 @@ def network_get(context, network_id, session=None): return result +@require_admin_context +def network_get_all(context): + session = get_session() + result = session.query(models.Network) + if not result: + raise exception.NotFound(_('No networks defined')) + return result + + # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 @@ -1093,6 +1130,19 @@ def network_get_by_instance(_context, instance_id): return rv +@require_admin_context +def network_get_all_by_instance(_context, instance_id): + session = get_session() + rv = session.query(models.Network).\ + filter_by(deleted=False).\ + join(models.Network.fixed_ips).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No network for instance %s') % instance_id) + return rv + + @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() @@ -2008,3 +2058,47 @@ def console_get(context, console_id, instance_id=None): raise exception.NotFound(_("No console with id %(console_id)s" " %(idesc)s") % locals()) return result + + +#################### + + +@require_admin_context +def zone_create(context, values): + zone = models.Zone() + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_update(context, zone_id, values): + zone = session.query(models.Zone).filter_by(id=zone_id).first() + if not zone: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_delete(context, zone_id): + session = get_session() + with session.begin(): + session.execute('delete from zones ' + 'where id=:id', {'id': zone_id}) + + +@require_admin_context +def zone_get(context, zone_id): + session = get_session() + result = session.query(models.Zone).filter_by(id=zone_id).first() + if not result: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + return result + + +@require_admin_context +def zone_get_all(context): + session = get_session() + return session.query(models.Zone).all() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index 366944591e8d..9e7ab3554295 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -508,17 +508,19 @@ def upgrade(migrate_engine): # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, key_pairs, networks, - projects, quotas, security_groups, security_group_inst_assoc, - security_group_rules, services, users, - user_project_association, user_project_role_association, - user_role_association, volumes): + tables = [auth_tokens, + instances, key_pairs, networks, fixed_ips, floating_ips, + quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, projects, + user_project_association, user_project_role_association, + user_role_association, volumes, export_devices] + for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') + meta.drop_all(tables=tables) raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 699b837f8b20..413536a5929f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -209,13 +209,16 @@ def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (certificates, consoles, console_pools, instance_actions, - iscsi_targets): + + tables = [certificates, console_pools, consoles, instance_actions, + iscsi_targets] + for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') + meta.drop_all(tables=tables) raise auth_tokens.c.user_id.alter(type=String(length=255, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py new file mode 100644 index 000000000000..5ba7910f1d2a --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + + +# +# Tables to alter +# + +networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + networks.create_column(networks_label) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py new file mode 100644 index 000000000000..ade981687745 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -0,0 +1,61 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# +# New Tables +# +zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# + +# (none currently) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (zones, ): + try: + table.create() + except Exception: + logging.info(repr(table)) diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 2a13c54660ae..9bdaa6d6b61d 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -17,12 +17,22 @@ # under the License. import os +import sys from nova import flags import sqlalchemy from migrate.versioning import api as versioning_api -from migrate.versioning import exceptions as versioning_exceptions + +try: + from migrate.versioning import exceptions as versioning_exceptions +except ImportError: + try: + # python-migration changed location of exceptions after 1.6.3 + # See LP Bug #717467 + from migrate import exceptions as versioning_exceptions + except ImportError: + sys.exit(_("python-migrate is not installed. Exiting.")) FLAGS = flags.FLAGS @@ -45,8 +55,8 @@ def db_version(): engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) meta.reflect(bind=engine) try: - for table in ('auth_tokens', 'export_devices', 'fixed_ips', - 'floating_ips', 'instances', + for table in ('auth_tokens', 'zones', 'export_devices', + 'fixed_ips', 'floating_ips', 'instances', 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', 'security_group_rules', 'security_groups', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7efb36c0e30b..40a96fc17663 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -373,6 +373,7 @@ class Network(BASE, NovaBase): "vpn_public_port"), {'mysql_engine': 'InnoDB'}) id = Column(Integer, primary_key=True) + label = Column(String(255)) injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) @@ -535,6 +536,15 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) +class Zone(BASE, NovaBase): + """Represents a child zone of this zone.""" + __tablename__ = 'zones' + id = Column(Integer, primary_key=True) + api_url = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + + def register_models(): """Register Models and create metadata. @@ -547,7 +557,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console) # , Image, Host + Project, Certificate, ConsolePool, Console, Zone) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/flags.py b/nova/flags.py index 1d8eba94fd5e..f64a62da9ca4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -282,12 +282,14 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") +DEFINE_string('logdir', None, 'output to a per-service log file in named ' + 'directory') DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') -DEFINE_string('sql_idle_timeout', - '3600', +DEFINE_integer('sql_idle_timeout', + 3600, 'timeout for idle sql database connections') DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') diff --git a/nova/image/s3.py b/nova/image/s3.py index 08a40f191721..14135a1ee432 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -36,6 +36,22 @@ from nova.image import service FLAGS = flags.FLAGS +def map_s3_to_base(image): + """Convert from S3 format to format defined by BaseImageService.""" + i = {} + i['id'] = image.get('imageId') + i['name'] = image.get('imageId') + i['kernel_id'] = image.get('kernelId') + i['ramdisk_id'] = image.get('ramdiskId') + i['location'] = image.get('imageLocation') + i['owner_id'] = image.get('imageOwnerId') + i['status'] = image.get('imageState') + i['type'] = image.get('type') + i['is_public'] = image.get('isPublic') + i['architecture'] = image.get('architecture') + return i + + class S3ImageService(service.BaseImageService): def modify(self, context, image_id, operation): @@ -65,26 +81,20 @@ class S3ImageService(service.BaseImageService): 'image_id': image_id})) return image_id - def _fix_image_id(self, images): - """S3 has imageId but OpenStack wants id""" - for image in images: - if 'imageId' in image: - image['id'] = image['imageId'] - return images - def index(self, context): """Return a list of all images that a user can see.""" response = self._conn(context).make_request( method='GET', bucket='_images') - return self._fix_image_id(json.loads(response.read())) + images = json.loads(response.read()) + return [map_s3_to_base(i) for i in images] def show(self, context, image_id): """return a image object if the context has permissions""" if FLAGS.connection_type == 'fake': return {'imageId': 'bar'} result = self.index(context) - result = [i for i in result if i['imageId'] == image_id] + result = [i for i in result if i['id'] == image_id] if not result: raise exception.NotFound(_('Image %s could not be found') % image_id) diff --git a/nova/log.py b/nova/log.py index b541488bdcdd..87a6dd51b773 100644 --- a/nova/log.py +++ b/nova/log.py @@ -28,9 +28,11 @@ It also allows setting of formatting information through flags. import cStringIO +import inspect import json import logging import logging.handlers +import os import sys import traceback @@ -92,7 +94,7 @@ critical = logging.critical log = logging.log # handlers StreamHandler = logging.StreamHandler -FileHandler = logging.FileHandler +RotatingFileHandler = logging.handlers.RotatingFileHandler # logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. SysLogHandler = logging.handlers.SysLogHandler @@ -111,6 +113,18 @@ def _dictify_context(context): return context +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def get_log_file_path(binary=None): + if FLAGS.logfile: + return FLAGS.logfile + if FLAGS.logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(FLAGS.logdir, binary),) + + def basicConfig(): logging.basicConfig() for handler in logging.root.handlers: @@ -123,8 +137,9 @@ def basicConfig(): syslog = SysLogHandler(address='/dev/log') syslog.setFormatter(_formatter) logging.root.addHandler(syslog) - if FLAGS.logfile: - logfile = FileHandler(FLAGS.logfile) + logpath = get_log_file_path() + if logpath: + logfile = RotatingFileHandler(logpath) logfile.setFormatter(_formatter) logging.root.addHandler(logfile) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ed37e8ba7e6f..535ce87bcb70 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -44,7 +44,7 @@ flags.DEFINE_string('dhcp_domain', flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') -flags.DEFINE_string('public_interface', 'vlan1', +flags.DEFINE_string('public_interface', 'eth0', 'Interface for public IP addresses') flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') @@ -54,6 +54,8 @@ flags.DEFINE_string('routing_source_ip', '$my_ip', 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') +flags.DEFINE_string('input_chain', 'INPUT', + 'chain to add nova_input to') flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') diff --git a/nova/network/manager.py b/nova/network/manager.py index 8eb9f041b17c..c6eba225e327 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -110,6 +110,7 @@ class NetworkManager(manager.Manager): This class must be subclassed to support specific topologies. """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): if not network_driver: @@ -138,6 +139,19 @@ class NetworkManager(manager.Manager): self.driver.ensure_floating_forward(floating_ip['address'], fixed_address) + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + super(NetworkManager, self).periodic_tasks(context) + if self.timeout_fixed_ips: + now = utils.utcnow() + timeout = FLAGS.fixed_ip_disassociate_timeout + time = now - datetime.timedelta(seconds=timeout) + num = self.db.fixed_ip_disassociate_all_by_timeout(context, + self.host, + time) + if num: + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + def set_network_host(self, context, network_id): """Safely sets the host of the network.""" LOG.debug(_("setting network host"), context=context) @@ -306,6 +320,7 @@ class FlatManager(NetworkManager): not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. """ + timeout_fixed_ips = False def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" @@ -331,11 +346,12 @@ class FlatManager(NetworkManager): pass def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, *args, **kwargs): + cidr_v6, label, *args, **kwargs): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) significant_bits_v6 = 64 + count = 1 for index in range(num_networks): start = index * network_size significant_bits = 32 - int(math.log(network_size, 2)) @@ -348,6 +364,11 @@ class FlatManager(NetworkManager): net['gateway'] = str(project_net[1]) net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) + if num_networks > 1: + net['label'] = "%s_%d" % (label, count) + else: + net['label'] = label + count += 1 if(FLAGS.use_ipv6): cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) @@ -451,18 +472,6 @@ class VlanManager(NetworkManager): instances in its subnet. """ - def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval.""" - super(VlanManager, self).periodic_tasks(context) - now = datetime.datetime.utcnow() - timeout = FLAGS.fixed_ip_disassociate_timeout - time = now - datetime.timedelta(seconds=timeout) - num = self.db.fixed_ip_disassociate_all_by_timeout(context, - self.host, - time) - if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) - def init_host(self): """Do any initialization that needs to be run if this is a standalone service. @@ -503,8 +512,14 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, vlan_start, vpn_start): + cidr_v6, vlan_start, vpn_start, **kwargs): """Create networks based on parameters.""" + # Check that num_networks + vlan_start is not > 4094, fixes lp708025 + if num_networks + vlan_start > 4094: + raise ValueError(_('The sum between the number of networks and' + ' the vlan start cannot be greater' + ' than 4094')) + fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) network_size_v6 = 1 << 64 diff --git a/nova/rpc.py b/nova/rpc.py index c19ee46356be..205bb524a12b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -29,6 +29,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +from eventlet import greenpool from eventlet import greenthread from nova import context @@ -42,11 +43,13 @@ from nova import utils FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') + class Connection(carrot_connection.BrokerConnection): """Connection instance object""" @classmethod - def instance(cls, new=False): + def instance(cls, new=True): """Returns the instance""" if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, @@ -155,11 +158,15 @@ class AdapterConsumer(TopicConsumer): def __init__(self, connection=None, topic="broadcast", proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + def receive(self, *args, **kwargs): + self.pool.spawn_n(self._receive, *args, **kwargs) + @exception.wrap_exception - def receive(self, message_data, message): + def _receive(self, message_data, message): """Magically looks for a method on the proxy object and calls it Message data should be a dictionary with two keys: @@ -246,7 +253,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance(True) + conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -319,7 +326,7 @@ def call(context, topic, msg): self.result = data['result'] wait_msg = WaitMessage() - conn = Connection.instance(True) + conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) consumer.register_callback(wait_msg) @@ -346,7 +353,7 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" LOG.debug(_("Making asynchronous cast...")) _pack_context(msg, context) - conn = Connection.instance(True) + conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 14eaaa62c713..77b1dd37f40e 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") - - -class LimiterTest(unittest.TestCase): - - def test_limiter(self): - items = range(2000) - req = Request.blank('/') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=3') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=2005') - self.assertEqual(limited(items, req), []) - req = Request.blank('/?limit=10') - self.assertEqual(limited(items, req), items[:10]) - req = Request.blank('/?limit=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?limit=3000') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=1&limit=3') - self.assertEqual(limited(items, req), items[1:4]) - req = Request.blank('/?offset=3&limit=0') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3&limit=1500') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3000&limit=10') - self.assertEqual(limited(items, req), []) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py new file mode 100644 index 000000000000..9d9837cc99a3 --- /dev/null +++ b/nova/tests/api/openstack/test_common.py @@ -0,0 +1,161 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suites for 'common' code used throughout the OpenStack HTTP API. +""" + +import unittest + +from webob import Request + +from nova.api.openstack.common import limited + + +class LimiterTest(unittest.TestCase): + """ + Unit tests for the `nova.api.openstack.common.limited` method which takes + in a list of items and, depending on the 'offset' and 'limit' GET params, + returns a subset or complete set of the given items. + """ + + def setUp(self): + """ + Run before each test. + """ + self.tiny = range(1) + self.small = range(10) + self.medium = range(1000) + self.large = range(10000) + + def test_limiter_offset_zero(self): + """ + Test offset key works with 0. + """ + req = Request.blank('/?offset=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_medium(self): + """ + Test offset key works with a medium sized number. + """ + req = Request.blank('/?offset=10') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), self.small[10:]) + self.assertEqual(limited(self.medium, req), self.medium[10:]) + self.assertEqual(limited(self.large, req), self.large[10:1010]) + + def test_limiter_offset_over_max(self): + """ + Test offset key works with a number over 1000 (max_limit). + """ + req = Request.blank('/?offset=1001') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), []) + self.assertEqual(limited(self.medium, req), []) + self.assertEqual(limited(self.large, req), self.large[1001:2001]) + + def test_limiter_offset_blank(self): + """ + Test offset key works with a blank offset. + """ + req = Request.blank('/?offset=') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_bad(self): + """ + Test offset key works with a BAD offset. + """ + req = Request.blank(u'/?offset=\u0020aa') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_nothing(self): + """ + Test request with no offset or limit + """ + req = Request.blank('/') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_zero(self): + """ + Test limit of zero. + """ + req = Request.blank('/?limit=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_medium(self): + """ + Test limit of 10. + """ + req = Request.blank('/?limit=10') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium[:10]) + self.assertEqual(limited(self.large, req), self.large[:10]) + + def test_limiter_limit_over_max(self): + """ + Test limit of 3000. + """ + req = Request.blank('/?limit=3000') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_and_offset(self): + """ + Test request with both limit and offset. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3&limit=1500') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req), []) + + def test_limiter_custom_max_limit(self): + """ + Test a max_limit other than 1000. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req, max_limit=2000), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3&limit=2500') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req, max_limit=2000), []) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 4ed13022bce5..565415e164de 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import json import unittest @@ -39,6 +40,13 @@ def return_server(context, id): return stub_instance(id) +def return_server_with_addresses(private, public): + def _return_server(context, id): + return stub_instance(id, private_address=private, + public_addresses=public) + return _return_server + + def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] @@ -55,9 +63,45 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id) +def stub_instance(id, user_id=1, private_address=None, public_addresses=None): + if public_addresses == None: + public_addresses = list() + + instance = { + "id": id, + "admin_pass": "", + "user_id": user_id, + "project_id": "", + "image_id": 10, + "kernel_id": "", + "ramdisk_id": "", + "launch_index": 0, + "key_name": "", + "key_data": "", + "state": 0, + "state_description": "", + "memory_mb": 0, + "vcpus": 0, + "local_gb": 0, + "hostname": "", + "host": "", + "instance_type": "", + "user_data": "", + "reservation_id": "", + "mac_address": "", + "scheduled_at": datetime.datetime.now(), + "launched_at": datetime.datetime.now(), + "terminated_at": datetime.datetime.now(), + "availability_zone": "", + "display_name": "server%s" % id, + "display_description": "", + "locked": False} + + instance["fixed_ip"] = { + "address": private_address, + "floating_ips": [{"address":ip} for ip in public_addresses]} + + return instance def fake_compute_api(cls, req, id): @@ -105,6 +149,22 @@ class ServersTest(unittest.TestCase): self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_id_with_addresses(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], '1') + self.assertEqual(res_dict['server']['name'], 'server1') + addresses = res_dict['server']['addresses'] + self.assertEqual(len(addresses["public"]), len(public)) + self.assertEqual(addresses["public"][0], public[0]) + self.assertEqual(len(addresses["private"]), 1) + self.assertEqual(addresses["private"][0], private) + def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') res = req.get_response(fakes.wsgi_app()) @@ -315,6 +375,18 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + def test_server_reset_network(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/reset_network') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py new file mode 100644 index 000000000000..5542a1cf3e2d --- /dev/null +++ b/nova/tests/api/openstack/test_zones.py @@ -0,0 +1,141 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest + +import stubout +import webob +import json + +import nova.db +from nova import context +from nova import flags +from nova.api.openstack import zones +from nova.tests.api.openstack import fakes + + +FLAGS = flags.FLAGS +FLAGS.verbose = True + + +def zone_get(context, zone_id): + return dict(id=1, api_url='http://foo.com', username='bob', + password='xxx') + + +def zone_create(context, values): + zone = dict(id=1) + zone.update(values) + return zone + + +def zone_update(context, zone_id, values): + zone = dict(id=zone_id, api_url='http://foo.com', username='bob', + password='xxx') + zone.update(values) + return zone + + +def zone_delete(context, zone_id): + pass + + +def zone_get_all(context): + return [ + dict(id=1, api_url='http://foo.com', username='bob', + password='xxx'), + dict(id=2, api_url='http://blah.com', username='alice', + password='qwerty') + ] + + +class ZonesTest(unittest.TestCase): + def setUp(self): + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + + self.allow_admin = FLAGS.allow_admin_api + FLAGS.allow_admin_api = True + + self.stubs.Set(nova.db, 'zone_get', zone_get) + self.stubs.Set(nova.db, 'zone_get_all', zone_get_all) + self.stubs.Set(nova.db, 'zone_update', zone_update) + self.stubs.Set(nova.db, 'zone_create', zone_create) + self.stubs.Set(nova.db, 'zone_delete', zone_delete) + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin + + def test_get_zone_list(self): + req = webob.Request.blank('/v1.0/zones') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['zones']), 2) + + def test_get_zone_by_id(self): + req = webob.Request.blank('/v1.0/zones/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') + self.assertFalse('password' in res_dict['zone']) + self.assertEqual(res.status_int, 200) + + def test_zone_delete(self): + req = webob.Request.blank('/v1.0/zones/1') + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + + def test_zone_create(self): + body = dict(zone=dict(api_url='http://blah.zoo', username='fred', + password='fubar')) + req = webob.Request.blank('/v1.0/zones') + req.method = 'POST' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo') + self.assertFalse('username' in res_dict['zone']) + + def test_zone_update(self): + body = dict(zone=dict(username='zeb', password='sneaky')) + req = webob.Request.blank('/v1.0/zones/1') + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') + self.assertFalse('username' in res_dict['zone']) + + +if __name__ == '__main__': + unittest.main() diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 2569e262b760..fa27825cdfa9 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -248,16 +248,14 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') + + group = [grp for grp in rv if grp.name == security_group_name][0] + + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') self.expect_http() self.mox.ReplayAll() @@ -314,16 +312,13 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '::/0') + + group = [grp for grp in rv if grp.name == security_group_name][0] + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '::/0') self.expect_http() self.mox.ReplayAll() diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2aa0690e770a..b049ac9439a1 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -202,6 +202,14 @@ class ComputeTestCase(test.TestCase): self.compute.set_admin_password(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_inject_file(self): + """Ensure we can write a file to an instance""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.inject_file(self.context, instance_id, "/tmp/test", + "File Contents") + self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py index 868a5ead392e..c2c9d777278d 100644 --- a/nova/tests/test_log.py +++ b/nova/tests/test_log.py @@ -46,6 +46,27 @@ class RootLoggerTestCase(test.TestCase): self.assert_(True) # didn't raise exception +class LogHandlerTestCase(test.TestCase): + def test_log_path_logdir(self): + self.flags(logdir='/some/path') + self.assertEquals(log.get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_logfile(self): + self.flags(logfile='/some/path/foo-bar.log') + self.assertEquals(log.get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_none(self): + self.assertTrue(log.get_log_file_path(binary='foo-bar') is None) + + def test_log_path_logfile_overrides_logdir(self): + self.flags(logdir='/some/other/path', + logfile='/some/path/foo-bar.log') + self.assertEquals(log.get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + class NovaFormatterTestCase(test.TestCase): def setUp(self): super(NovaFormatterTestCase, self).setUp() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 9f5b266f33ec..6b8efc9d8770 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -32,6 +32,7 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi.vmops import SimpleDH +from nova.virt.xenapi.vmops import VMOps from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs from nova.tests.glance import stubs as glance_stubs @@ -141,6 +142,10 @@ class XenAPIVolumeTestCase(test.TestCase): self.stubs.UnsetAll() +def reset_network(*args): + pass + + class XenAPIVMTestCase(test.TestCase): """ Unit tests for VM operations @@ -162,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) + self.stubs.Set(VMOps, 'reset_network', reset_network) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) @@ -243,7 +249,8 @@ class XenAPIVMTestCase(test.TestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - def _test_spawn(self, image_id, kernel_id, ramdisk_id): + def _test_spawn(self, image_id, kernel_id, ramdisk_id, + instance_type="m1.large"): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) values = {'name': 1, 'id': 1, @@ -252,7 +259,7 @@ class XenAPIVMTestCase(test.TestCase): 'image_id': image_id, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, - 'instance_type': 'm1.large', + 'instance_type': instance_type, 'mac_address': 'aa:bb:cc:dd:ee:ff', } conn = xenapi_conn.get_connection(False) @@ -260,6 +267,12 @@ class XenAPIVMTestCase(test.TestCase): conn.spawn(instance) self.check_vm_record(conn) + def test_spawn_not_enough_memory(self): + FLAGS.xenapi_image_service = 'glance' + self.assertRaises(Exception, + self._test_spawn, + 1, 2, 3, "m1.xlarge") + def test_spawn_raw_objectstore(self): FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, None, None) diff --git a/nova/twistd.py b/nova/twistd.py index 6390a814452f..60ff7879a0fa 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,8 +43,6 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' - '(will be prepended to $logfile)') class TwistdServerOptions(ServerOptions): diff --git a/nova/utils.py b/nova/utils.py index 8d7ff1f641e2..42efa0008d3c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -20,13 +20,14 @@ System-level utilities and helper functions. """ +import base64 import datetime import inspect import json import os import random -import subprocess import socket +import string import struct import sys import time @@ -36,6 +37,7 @@ import netaddr from eventlet import event from eventlet import greenthread +from eventlet.green import subprocess from nova import exception from nova.exception import ProcessExecutionError @@ -235,6 +237,15 @@ def generate_mac(): return ':'.join(map(lambda x: "%02x" % x, mac)) +def generate_password(length=20): + """Generate a random sequence of letters and digits + to be used as a password. Note that this is not intended + to represent the ultimate in security. + """ + chrs = string.letters + string.digits + return "".join([random.choice(chrs) for i in xrange(length)]) + + def last_octet(address): return int(address.split(".")[-1]) @@ -476,3 +487,15 @@ def dumps(value): def loads(s): return json.loads(s) + + +def ensure_b64_encoding(val): + """Safety method to ensure that values expected to be base64-encoded + actually are. If they are, the value is returned unchanged. Otherwise, + the encoded value is returned. + """ + try: + dummy = base64.decode(val) + return val + except TypeError: + return base64.b64encode(val) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 161445b86e1f..92749f38aa4f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -152,6 +152,21 @@ class FakeConnection(object): """ pass + def inject_file(self, instance, b64_path, b64_contents): + """ + Writes a file on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the base64-encoded path to which the file is to be + written on the instance; the third is the contents of the file, also + base64-encoded. + + The work will be done asynchronously. This function returns a + task that allows the caller to detect when it is complete. + """ + pass + def rescue(self, instance): """ Rescue the specified instance. diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e8352771c81f..018d0dcd3086 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -286,6 +286,10 @@ class SessionBase(object): rec['currently_attached'] = False rec['device'] = '' + def host_compute_free_memory(self, _1, ref): + #Always return 12GB available + return 12 * 1024 * 1024 * 1024 + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4bbd522c1735..80cc3035d7fe 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -138,6 +138,16 @@ class VMHelper(HelperBase): LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref + @classmethod + def ensure_free_mem(cls, session, instance): + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + mem = long(instance_type['memory_mb']) * 1024 * 1024 + #get free memory from host + host = session.get_xenapi_host() + host_free_mem = long(session.get_xenapi().host. + compute_free_memory(host)) + return host_free_mem >= mem + @classmethod def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new @@ -384,7 +394,7 @@ class VMHelper(HelperBase): pv = True elif pv_str.lower() == 'false': pv = False - LOG.debug(_("PV Kernel in VDI:%d"), pv) + LOG.debug(_("PV Kernel in VDI:%s"), pv) return pv @classmethod @@ -439,6 +449,14 @@ class VMHelper(HelperBase): else: return None + @classmethod + def lookup_kernel_ramdisk(cls, session, vm): + vm_rec = session.get_xenapi().VM.get_record(vm) + if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: + return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) + else: + return (None, None) + @classmethod def compile_info(cls, record): """Fill record with VM status information""" diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e84ce20c4964..0168681f6b93 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -67,13 +67,19 @@ class VMOps(object): raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance.name) - bridge = db.network_get_by_instance(context.get_admin_context(), - instance['id'])['bridge'] - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, bridge) + #ensure enough free memory is available + if not VMHelper.ensure_free_mem(self._session, instance): + name = instance['name'] + LOG.exception(_('instance %(name)s: not enough free memory') + % locals()) + db.instance_set_state(context.get_admin_context(), + instance['id'], + power_state.SHUTDOWN) + return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) + #if kernel is not present we must download a raw disk if instance.kernel_id: disk_image_type = ImageType.DISK @@ -99,16 +105,70 @@ class VMOps(object): instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) - if network_ref: - VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + # write network info + admin_context = context.get_admin_context() + + # TODO(tr3buchet) - remove comment in multi-nic + # I've decided to go ahead and consider multiple IPs and networks + # at this stage even though they aren't implemented because these will + # be needed for multi-nic and there was no sense writing it for single + # network/single IP and then having to turn around and re-write it + IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) + for network in db.network_get_all_by_instance(admin_context, + instance['id']): + network_IPs = [ip for ip in IPs if ip.network_id == network.id] + + def ip_dict(ip): + return {'netmask': network['netmask'], + 'enabled': '1', + 'ip': ip.address} + + mac_id = instance.mac_address.replace(':', '') + location = 'vm-data/networking/%s' % mac_id + mapping = {'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance.mac_address, + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_IPs]} + self.write_to_param_xenstore(vm_ref, {location: mapping}) + + # TODO(tr3buchet) - remove comment in multi-nic + # this bit here about creating the vifs will be updated + # in multi-nic to handle multiple IPs on the same network + # and multiple networks + # for now it works as there is only one of each + bridge = network['bridge'] + network_ref = \ + NetworkHelper.find_network_with_bridge(self._session, bridge) + + if network_ref: + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) + def _inject_onset_files(): + onset_files = instance.onset_files + if onset_files: + # Check if this is a JSON-encoded string and convert if needed. + if isinstance(onset_files, basestring): + try: + onset_files = json.loads(onset_files) + except ValueError: + LOG.exception(_("Invalid value for onset_files: '%s'") + % onset_files) + onset_files = [] + # Inject any files, if specified + for path, contents in instance.onset_files: + LOG.debug(_("Injecting file path: '%s'") % path) + self.inject_file(instance, path, contents) # NOTE(armando): Do we really need to do this in virt? + # NOTE(tr3buchet): not sure but wherever we do it, we need to call + # reset_network afterwards timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -119,6 +179,8 @@ class VMOps(object): if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() + _inject_onset_files() + return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), @@ -127,8 +189,13 @@ class VMOps(object): instance['id'], power_state.SHUTDOWN) timer.stop() + return False timer.f = _wait_for_boot + + # call reset networking + self.reset_network(instance) + return timer.start(interval=0.5, now=True) def _get_vm_opaque_ref(self, instance_or_vm): @@ -161,7 +228,8 @@ class VMOps(object): instance_name = instance_or_vm.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: - raise Exception(_('Instance not present %s') % instance_name) + raise exception.NotFound( + _('Instance not present %s') % instance_name) return vm def snapshot(self, instance, image_id): @@ -255,6 +323,32 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] + def inject_file(self, instance, b64_path, b64_contents): + """Write a file to the VM instance. The path to which it is to be + written and the contents of the file need to be supplied; both should + be base64-encoded to prevent errors with non-ASCII characters being + transmitted. If the agent does not support file injection, or the user + has disabled it, a NotImplementedError will be raised. + """ + # Files/paths *should* be base64-encoded at this point, but + # double-check to make sure. + b64_path = utils.ensure_b64_encoding(b64_path) + b64_contents = utils.ensure_b64_encoding(b64_contents) + + # Need to uniquely identify this request. + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id, 'b64_path': b64_path, + 'b64_contents': b64_contents} + # If the agent doesn't support file injection, a NotImplementedError + # will be raised with the appropriate message. + resp = self._make_agent_call('inject_file', instance, '', args) + resp_dict = json.loads(resp) + if resp_dict['returncode'] != '0': + # There was some other sort of error; the message will contain + # a description of the error. + raise RuntimeError(resp_dict['message']) + return resp_dict['message'] + def _shutdown(self, instance, vm): """Shutdown an instance """ state = self.get_info(instance['name'])['state'] @@ -286,8 +380,23 @@ class VMOps(object): def _destroy_vm(self, instance, vm): """Destroys a VM record """ try: - task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(instance.id, task) + kernel = None + ramdisk = None + if instance.kernel_id or instance.ramdisk_id: + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( + self._session, vm) + task1 = self._session.call_xenapi('Async.VM.destroy', vm) + LOG.debug(_("Removing kernel/ramdisk files")) + fn = "remove_kernel_ramdisk" + args = {} + if kernel: + args['kernel-file'] = kernel + if ramdisk: + args['ramdisk-file'] = ramdisk + task2 = self._session.async_call_plugin('glance', fn, args) + self._session.wait_for_task(instance.id, task1) + self._session.wait_for_task(instance.id, task2) + LOG.debug(_("kernel/ramdisk files removed")) except self.XenAPI.Failure, exc: LOG.exception(exc) @@ -374,6 +483,14 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def reset_network(self, instance): + """ + Creates uuid arg to pass to make_agent_call and calls it. + + """ + args = {'id': str(uuid.uuid4())} + resp = self._make_agent_call('resetnetwork', instance, '', args) + def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, @@ -443,6 +560,11 @@ class VMOps(object): if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + elif 'NOT IMPLEMENTED:' in err_msg: + LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not' + ' supported by the agent. VM id=%(instance_id)s;' + ' args=%(strargs)s') % locals()) + raise NotImplementedError(err_msg) else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a0b0499b8715..c2f65699fd77 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -168,6 +168,12 @@ class XenAPIConnection(object): """Set the root/admin password on the VM instance""" self._vmops.set_admin_password(instance, new_pass) + def inject_file(self, instance, b64_path, b64_contents): + """Create a file on the VM instance. The file path and contents + should be base64-encoded. + """ + self._vmops.inject_file(instance, b64_path, b64_contents) + def destroy(self, instance): """Destroy VM instance""" self._vmops.destroy(instance) @@ -188,6 +194,10 @@ class XenAPIConnection(object): """resume the specified instance""" self._vmops.resume(instance, callback) + def reset_network(self, instance): + """reset networking for specified instance""" + self._vmops.reset_network(instance) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 6e70ec881aef..d2f02e4e075b 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -111,10 +111,10 @@ class VolumeManager(manager.Manager): LOG.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) - except Exception as e: + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) - raise e + raise now = datetime.datetime.utcnow() self.db.volume_update(context, @@ -137,11 +137,11 @@ class VolumeManager(manager.Manager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) - except Exception as e: + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error_deleting'}) - raise e + raise self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 12c3a19c8d0c..94eaabe73362 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -73,8 +73,8 @@ def key_init(self, arg_dict): @jsonify def password(self, arg_dict): """Writes a request to xenstore that tells the agent to set - the root password for the given VM. The password should be - encrypted using the shared secret key that was returned by a + the root password for the given VM. The password should be + encrypted using the shared secret key that was returned by a previous call to key_init. The encrypted password value should be passed as the value for the 'enc_pass' key in arg_dict. """ @@ -91,6 +91,17 @@ def password(self, arg_dict): return resp +@jsonify +def resetnetwork(self, arg_dict): + """Writes a resquest to xenstore that tells the agent + to reset networking. + """ + arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''}) + request_id = arg_dict['id'] + arg_dict['path'] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + + def _wait_for_agent(self, request_id, arg_dict): """Periodically checks xenstore for a response from the agent. The request is always written to 'data/host/{id}', and @@ -108,7 +119,8 @@ def _wait_for_agent(self, request_id, arg_dict): # First, delete the request record arg_dict["path"] = "data/host/%s" % request_id xenstore.delete_record(self, arg_dict) - raise TimeoutError("TIMEOUT: No response from agent within %s seconds." % + raise TimeoutError( + "TIMEOUT: No response from agent within %s seconds." % AGENT_TIMEOUT) ret = xenstore.read_record(self, arg_dict) # Note: the response for None with be a string that includes @@ -123,4 +135,5 @@ def _wait_for_agent(self, request_id, arg_dict): if __name__ == "__main__": XenAPIPlugin.dispatch( {"key_init": key_init, - "password": password}) + "password": password, + "resetnetwork": resetnetwork}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aadacce57456..8cb439259f7f 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -43,32 +43,47 @@ CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' FILE_SR_PATH = '/var/run/sr-mount' -def copy_kernel_vdi(session,args): + +def remove_kernel_ramdisk(session, args): + """Removes kernel and/or ramdisk from dom0's file system""" + kernel_file = exists(args, 'kernel-file') + ramdisk_file = exists(args, 'ramdisk-file') + if kernel_file: + os.remove(kernel_file) + if ramdisk_file: + os.remove(ramdisk_file) + return "ok" + + +def copy_kernel_vdi(session, args): vdi = exists(args, 'vdi-ref') - size = exists(args,'image-size') + size = exists(args, 'image-size') #Use the uuid as a filename - vdi_uuid=session.xenapi.VDI.get_uuid(vdi) - copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} - filename=with_vdi_in_dom0(session, vdi, False, + vdi_uuid = session.xenapi.VDI.get_uuid(vdi) + copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} + filename = with_vdi_in_dom0(session, vdi, False, lambda dev: - _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + _copy_kernel_vdi('/dev/%s' % dev, copy_args)) return filename -def _copy_kernel_vdi(dest,copy_args): - vdi_uuid=copy_args['vdi_uuid'] - vdi_size=copy_args['vdi_size'] - logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) - filename=KERNEL_DIR + '/' + vdi_uuid + +def _copy_kernel_vdi(dest, copy_args): + vdi_uuid = copy_args['vdi_uuid'] + vdi_size = copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", + dest, vdi_uuid) + filename = KERNEL_DIR + '/' + vdi_uuid #read data from /dev/ and write into a file on /boot/guest - of=open(filename,'wb') - f=open(dest,'rb') + of = open(filename, 'wb') + f = open(dest, 'rb') #copy only vdi_size bytes - data=f.read(vdi_size) + data = f.read(vdi_size) of.write(data) f.close() - of.close() - logging.debug("Done. Filename: %s",filename) - return filename + of.close() + logging.debug("Done. Filename: %s", filename) + return filename + def put_vdis(session, args): params = pickle.loads(exists(args, 'params')) @@ -76,22 +91,23 @@ def put_vdis(session, args): image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] - + sr_path = get_sr_path(session) #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) + tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] - paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ] + paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids] tar_cmd.extend(paths) logging.debug("Bundling image with cmd: %s", tar_cmd) subprocess.call(tar_cmd) - logging.debug("Writing to test file %s", tmp_file) + logging.debug("Writing to test file %s", tmp_file) put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) - return "" # FIXME(sirp): return anything useful here? + # FIXME(sirp): return anything useful here? + return "" def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): - size = os.path.getsize(tmp_file) + size = os.path.getsize(tmp_file) basename = os.path.basename(tmp_file) bundle = open(tmp_file, 'r') @@ -112,12 +128,11 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() - + chunk = bundle.read(CHUNK_SIZE) while chunk: conn.send(chunk) chunk = bundle.read(CHUNK_SIZE) - res = conn.getresponse() #FIXME(sirp): should this be 201 Created? @@ -126,6 +141,7 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): finally: bundle.close() + def get_sr_path(session): sr_ref = find_sr(session) @@ -156,5 +172,6 @@ def find_sr(session): if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, - 'copy_kernel_vdi': copy_kernel_vdi}) + XenAPIPlugin.dispatch({'put_vdis': put_vdis, + 'copy_kernel_vdi': copy_kernel_vdi, + 'remove_kernel_ramdisk': remove_kernel_ramdisk}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore index 8ee2f748d710..d0313b4ed149 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore @@ -43,34 +43,37 @@ SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE -def is_vdi_pv(session,args): + +def is_vdi_pv(session, args): logging.debug("Checking wheter VDI has PV kernel") vdi = exists(args, 'vdi-ref') - pv=with_vdi_in_dom0(session, vdi, False, + pv = with_vdi_in_dom0(session, vdi, False, lambda dev: _is_vdi_pv('/dev/%s' % dev)) if pv: return 'true' else: return 'false' + def _is_vdi_pv(dest): - logging.debug("Running pygrub against %s",dest) - output=os.popen('pygrub -qn %s' % dest) - pv=False + logging.debug("Running pygrub against %s", dest) + output = os.popen('pygrub -qn %s' % dest) + pv = False for line in output.readlines(): #try to find kernel string - m=re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)', line) if m: - if m.group(0).find('xen')!=-1: - pv=True - logging.debug("PV:%d",pv) - return pv - + if m.group(0).find('xen') != -1: + pv = True + logging.debug("PV:%d", pv) + return pv + + def get_vdi(session, args): src_url = exists(args, 'src_url') username = exists(args, 'username') password = exists(args, 'password') - raw_image=validate_bool(args, 'raw', 'false') + raw_image = validate_bool(args, 'raw', 'false') add_partition = validate_bool(args, 'add_partition', 'false') (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) sr = find_sr(session) @@ -88,16 +91,17 @@ def get_vdi(session, args): vdi = create_vdi(session, sr, src_url, vdi_size, False) with_vdi_in_dom0(session, vdi, False, lambda dev: get_vdi_(proto, netloc, url_path, - username, password, add_partition,raw_image, + username, password, + add_partition, raw_image, virtual_size, '/dev/%s' % dev)) return session.xenapi.VDI.get_uuid(vdi) -def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image, - virtual_size, dest): +def get_vdi_(proto, netloc, url_path, username, password, + add_partition, raw_image, virtual_size, dest): - #Salvatore: vdi should not be partitioned for raw images - if (add_partition and not raw_image): + #vdi should not be partitioned for raw images + if add_partition and not raw_image: write_partition(virtual_size, dest) offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0 @@ -144,7 +148,7 @@ def get_kernel(session, args): password = exists(args, 'password') (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - + dest = os.path.join(KERNEL_DIR, url_path[1:]) # Paranoid check against people using ../ to do rude things. @@ -154,8 +158,8 @@ def get_kernel(session, args): dirname = os.path.dirname(dest) try: os.makedirs(dirname) - except os.error, e: - if e.errno != errno.EEXIST: + except os.error, e: + if e.errno != errno.EEXIST: raise if not os.path.isdir(dirname): raise Exception('Cannot make directory %s', dirname) @@ -248,5 +252,5 @@ def download_all(response, length, dest_file, offset): if __name__ == '__main__': XenAPIPlugin.dispatch({'get_vdi': get_vdi, - 'get_kernel': get_kernel, + 'get_kernel': get_kernel, 'is_vdi_pv': is_vdi_pv}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index 695bf3448d69..a35ccd6ab46a 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -36,7 +36,15 @@ pluginlib.configure_logging("xenstore") def jsonify(fnc): def wrapper(*args, **kwargs): - return json.dumps(fnc(*args, **kwargs)) + ret = fnc(*args, **kwargs) + try: + json.loads(ret) + except ValueError: + # Value should already be JSON-encoded, but some operations + # may write raw sting values; this will catch those and + # properly encode them. + ret = json.dumps(ret) + return ret return wrapper diff --git a/setup.py b/setup.py index e3c45ce3e009..4e25f43ed18f 100644 --- a/setup.py +++ b/setup.py @@ -85,9 +85,13 @@ setup(name='nova', packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector', - scripts=['bin/nova-api', + scripts=['bin/nova-ajax-console-proxy', + 'bin/nova-api', + 'bin/nova-combined', 'bin/nova-compute', + 'bin/nova-console', 'bin/nova-dhcpbridge', + 'bin/nova-direct-api', 'bin/nova-import-canonical-imagestore', 'bin/nova-instancemonitor', 'bin/nova-logspool', @@ -96,5 +100,6 @@ setup(name='nova', 'bin/nova-objectstore', 'bin/nova-scheduler', 'bin/nova-spoolsentry', + 'bin/stack', 'bin/nova-volume', 'tools/nova-debug'])