diff --git a/.bzrignore b/.bzrignore index d81a7d829df7..b271561a3564 100644 --- a/.bzrignore +++ b/.bzrignore @@ -12,3 +12,4 @@ CA/openssl.cnf CA/serial* CA/newcerts/*.pem CA/private/cakey.pem +nova/vcsversion.py diff --git a/.mailmap b/.mailmap index 010678569a24..2af2d7cd9b63 100644 --- a/.mailmap +++ b/.mailmap @@ -30,3 +30,4 @@ + diff --git a/Authors b/Authors index 639e68a59e10..47101e272a48 100644 --- a/Authors +++ b/Authors @@ -3,6 +3,7 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio +Chiradeep Vittal Chris Behrens Chmouel Boudjnah Cory Wright @@ -22,6 +23,7 @@ Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara +Ken Pepple Matt Dietz Michael Gundlach Monty Taylor @@ -39,4 +41,3 @@ Trey Morris Vishvananda Ishaya Youcef Laribi Zhixue Wu - diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000000..15cd6cb76b93 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/nova-api-paste b/bin/nova-api-paste new file mode 100755 index 000000000000..419f0bbdc29c --- /dev/null +++ b/bin/nova-api-paste @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API.""" + +import gettext +import os +import sys + +from paste import deploy + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +from nova import flags +from nova import log as logging +from nova import wsgi + +LOG = logging.getLogger('nova.api') +LOG.setLevel(logging.DEBUG) +LOG.addHandler(logging.StreamHandler()) + +FLAGS = flags.FLAGS + +API_ENDPOINTS = ['ec2', 'openstack'] + + +def load_configuration(paste_config): + """Load the paste configuration from the config file and return it.""" + config = None + # Try each known name to get the global DEFAULTS, which will give ports + for name in API_ENDPOINTS: + try: + config = deploy.appconfig("config:%s" % paste_config, name=name) + except LookupError: + pass + if config: + verbose = config.get('verbose', None) + if verbose: + FLAGS.verbose = int(verbose) == 1 + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + return config + LOG.debug(_("Paste config at %s has no secion for known apis"), + paste_config) + print _("Paste config at %s has no secion for any known apis") % \ + paste_config + os.exit(1) + + +def launch_api(paste_config_file, section, server, port, host): + """Launch an api server from the specified port and IP.""" + LOG.debug(_("Launching %s api on %s:%s"), section, host, port) + app = deploy.loadapp('config:%s' % paste_config_file, name=section) + server.start(app, int(port), host) + + +def run_app(paste_config_file): + LOG.debug(_("Using paste.deploy config at: %s"), configfile) + config = load_configuration(paste_config_file) + LOG.debug(_("Configuration: %r"), config) + server = wsgi.Server() + ip = config.get('host', '0.0.0.0') + for api in API_ENDPOINTS: + port = config.get("%s_port" % api, None) + if not port: + continue + host = config.get("%s_host" % api, ip) + launch_api(configfile, api, server, port, host) + LOG.debug(_("All api servers launched, now waiting")) + server.wait() + + +if __name__ == '__main__': + FLAGS(sys.argv) + configfiles = ['/etc/nova/nova-api.conf'] + if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + configfiles.insert(0, + os.path.join(possible_topdir, 'etc', 'nova-api.conf')) + for configfile in configfiles: + if os.path.exists(configfile): + run_app(configfile) + break + else: + LOG.debug(_("Skipping missing configuration: %s"), configfile) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 828aba3d1bac..1a994d956311 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers. """ import gettext -import logging import os import sys @@ -39,6 +38,7 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import utils from nova.network import linux_net @@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') +LOG = logging.getLogger('nova.dhcpbridge') + def add_lease(mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - logging.debug("leasing ip") + LOG.debug(_("leasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), mac, @@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface): def old_lease(mac, ip_address, hostname, interface): """Update just as add lease.""" - logging.debug("Adopted old lease or got a change of mac/hostname") + LOG.debug(_("Adopted old lease or got a change of mac/hostname")) add_lease(mac, ip_address, hostname, interface) def del_lease(mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - logging.debug("releasing ip") + LOG.debug(_("releasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), mac, @@ -100,6 +102,7 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) + logging.basicConfig() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True @@ -117,9 +120,9 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - logging.debug("Called %s for mac %s with ip %s and " - "hostname %s on interface %s", - action, mac, ip, hostname, interface) + LOG.debug(_("Called %s for mac %s with ip %s and " + "hostname %s on interface %s"), + action, mac, ip, hostname, interface) globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 5dac3ffe608d..7dca0201451c 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -23,7 +23,6 @@ import gettext import os -import logging import sys from twisted.application import service @@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import log as logging from nova import utils from nova import twistd from nova.compute import monitor +# TODO(todd): shouldn't this be done with flags? And what about verbose? logging.getLogger('boto').setLevel(logging.WARN) +LOG = logging.getLogger('nova.instancemonitor') + if __name__ == '__main__': utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - logging.warn('Starting instance monitor') + LOG.warn(_('Starting instance monitor')) # pylint: disable-msg=C0103 monitor = monitor.InstanceMonitor() diff --git a/bin/nova-logspool b/bin/nova-logspool new file mode 100644 index 000000000000..097459b12339 --- /dev/null +++ b/bin/nova-logspool @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tools for working with logs generated by nova components +""" + + +import json +import os +import re +import sys + + +class Request(object): + def __init__(self): + self.time = "" + self.host = "" + self.logger = "" + self.message = "" + self.trace = "" + self.env = "" + self.request_id = "" + + def add_error_line(self, error_line): + self.time = " ".join(error_line.split(" ")[:3]) + self.host = error_line.split(" ")[3] + self.logger = error_line.split("(")[1].split(" ")[0] + self.request_id = error_line.split("[")[1].split(" ")[0] + error_lines = error_line.split("#012") + self.message = self.clean_log_line(error_lines.pop(0)) + self.trace = "\n".join([self.clean_trace(l) for l in error_lines]) + + def add_environment_line(self, env_line): + self.env = self.clean_env_line(env_line) + + def clean_log_line(self, line): + """Remove log format for time, level, etc: split after context""" + return line.split('] ')[-1] + + def clean_env_line(self, line): + """Also has an 'Environment: ' string in the message""" + return re.sub(r'^Environment: ', '', self.clean_log_line(line)) + + def clean_trace(self, line): + """trace has a different format, so split on TRACE:""" + return line.split('TRACE: ')[-1] + + def to_dict(self): + return {'traceback': self.trace, 'message': self.message, + 'host': self.host, 'env': self.env, 'logger': self.logger, + 'request_id': self.request_id} + + +class LogReader(object): + def __init__(self, filename): + self.filename = filename + self._errors = {} + + def process(self, spooldir): + with open(self.filename) as f: + line = f.readline() + while len(line) > 0: + parts = line.split(" ") + level = (len(parts) < 6) or parts[5] + if level == 'ERROR': + self.handle_logged_error(line) + elif level == '[-]' and self.last_error: + # twisted stack trace line + clean_line = " ".join(line.split(" ")[6:]) + self.last_error.trace = self.last_error.trace + clean_line + else: + self.last_error = None + line = f.readline() + self.update_spool(spooldir) + + def handle_logged_error(self, line): + request_id = re.search(r' \[([A-Z0-9\-/]+)', line) + if not request_id: + raise Exception("Unable to parse request id from %s" % line) + request_id = request_id.group(1) + data = self._errors.get(request_id, Request()) + if self.is_env_line(line): + data.add_environment_line(line) + elif self.is_error_line(line): + data.add_error_line(line) + else: + # possibly error from twsited + data.add_error_line(line) + self.last_error = data + self._errors[request_id] = data + + def is_env_line(self, line): + return re.search('Environment: ', line) + + def is_error_line(self, line): + return re.search('raised', line) + + def update_spool(self, directory): + processed_dir = "%s/processed" % directory + self._ensure_dir_exists(processed_dir) + for rid, value in self._errors.iteritems(): + if not self.has_been_processed(processed_dir, rid): + with open("%s/%s" % (directory, rid), "w") as spool: + spool.write(json.dumps(value.to_dict())) + self.flush_old_processed_spool(processed_dir) + + def _ensure_dir_exists(self, d): + mkdir = False + try: + os.stat(d) + except: + mkdir = True + if mkdir: + os.mkdir(d) + + def has_been_processed(self, processed_dir, rid): + rv = False + try: + os.stat("%s/%s" % (processed_dir, rid)) + rv = True + except: + pass + return rv + + def flush_old_processed_spool(self, processed_dir): + keys = self._errors.keys() + procs = os.listdir(processed_dir) + for p in procs: + if p not in keys: + # log has rotated and the old error won't be seen again + os.unlink("%s/%s" % (processed_dir, p)) + +if __name__ == '__main__': + filename = '/var/log/nova.log' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + filename = sys.argv[1] + if len(sys.argv) > 2: + spooldir = sys.argv[2] + LogReader(filename).process(spooldir) diff --git a/bin/nova-manage b/bin/nova-manage index 599e02a7ed0a..40f540e5b047 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,9 +53,10 @@ CLI interface for nova management. """ +import datetime import gettext -import logging import os +import re import sys import time @@ -452,6 +453,61 @@ class NetworkCommands(object): int(network_size), int(vlan_start), int(vpn_start)) + +class ServiceCommands(object): + """Enable and disable running services""" + + def list(self, host=None, service=None): + """Show a list of all running services. Filter by host & service name. + args: [host] [service]""" + ctxt = context.get_admin_context() + now = datetime.datetime.utcnow() + services = db.service_get_all(ctxt) + if host: + services = [s for s in services if s['host'] == host] + if service: + services = [s for s in services if s['binary'] == service] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= 15) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'], + active, art, + svc['updated_at']) + + def enable(self, host, service): + """Enable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': False}) + + def disable(self, host, service): + """Disable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': True}) + + +class LogCommands(object): + def request(self, request_id, logfile='/var/log/nova.log'): + """Show all fields in the log for the given request. Assumes you + haven't changed the log format too much. + ARGS: request_id [logfile]""" + lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id)) + print re.sub('#012', "\n", "\n".join(lines)) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -459,7 +515,9 @@ CATEGORIES = [ ('shell', ShellCommands), ('vpn', VpnCommands), ('floating', FloatingIpCommands), - ('network', NetworkCommands)] + ('network', NetworkCommands), + ('service', ServiceCommands), + ('log', LogCommands)] def lazy_match(name, key_value_tuples): @@ -498,9 +556,6 @@ def main(): utils.default_flagfile() argv = FLAGS(sys.argv) - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - script_name = argv.pop(0) if len(argv) < 1: print script_name + " category action []" diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry new file mode 100644 index 000000000000..ab20268a9948 --- /dev/null +++ b/bin/nova-spoolsentry @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import base64 +import json +import logging +import os +import shutil +import sys +import urllib +import urllib2 +try: + import cPickle as pickle +except: + import pickle + + +class SpoolSentry(object): + def __init__(self, spool_dir, sentry_url, key=None): + self.spool_dir = spool_dir + self.sentry_url = sentry_url + self.key = key + + def process(self): + for fname in os.listdir(self.spool_dir): + if fname == "processed": + continue + try: + sourcefile = "%s/%s" % (self.spool_dir, fname) + with open(sourcefile) as f: + fdata = f.read() + data_from_json = json.loads(fdata) + data = self.build_data(data_from_json) + self.send_data(data) + destfile = "%s/processed/%s" % (self.spool_dir, fname) + shutil.move(sourcefile, destfile) + except: + logging.exception("Unable to upload record %s", fname) + raise + + def build_data(self, filejson): + env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000', + 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'} + if filejson['env']: + env = json.loads(filejson['env']) + url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'], + env['SCRIPT_NAME'], env['PATH_INFO']) + rv = {'logger': filejson['logger'], 'level': logging.ERROR, + 'server_name': filejson['host'], 'url': url, + 'message': filejson['message'], + 'traceback': filejson['traceback']} + rv['data'] = {} + if filejson['env']: + rv['data']['META'] = env + if filejson['request_id']: + rv['data']['request_id'] = filejson['request_id'] + return rv + + def send_data(self, data): + data = { + 'data': base64.b64encode(pickle.dumps(data).encode('zlib')), + 'key': self.key + } + req = urllib2.Request(self.sentry_url) + res = urllib2.urlopen(req, urllib.urlencode(data)) + if res.getcode() != 200: + raise Exception("Bad HTTP code: %s" % res.getcode()) + txt = res.read() + +if __name__ == '__main__': + sentryurl = 'http://127.0.0.1/sentry/store/' + key = '' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + sentryurl = sys.argv[1] + if len(sys.argv) > 2: + key = sys.argv[2] + if len(sys.argv) > 3: + spooldir = sys.argv[3] + SpoolSentry(spooldir, sentryurl, key).process() diff --git a/doc/.autogenerated b/doc/.autogenerated deleted file mode 100644 index 3a70f87808d1..000000000000 --- a/doc/.autogenerated +++ /dev/null @@ -1,97 +0,0 @@ -source/api/nova..adminclient.rst -source/api/nova..api.cloud.rst -source/api/nova..api.ec2.admin.rst -source/api/nova..api.ec2.apirequest.rst -source/api/nova..api.ec2.cloud.rst -source/api/nova..api.ec2.images.rst -source/api/nova..api.ec2.metadatarequesthandler.rst -source/api/nova..api.openstack.auth.rst -source/api/nova..api.openstack.backup_schedules.rst -source/api/nova..api.openstack.faults.rst -source/api/nova..api.openstack.flavors.rst -source/api/nova..api.openstack.images.rst -source/api/nova..api.openstack.servers.rst -source/api/nova..api.openstack.sharedipgroups.rst -source/api/nova..auth.dbdriver.rst -source/api/nova..auth.fakeldap.rst -source/api/nova..auth.ldapdriver.rst -source/api/nova..auth.manager.rst -source/api/nova..auth.signer.rst -source/api/nova..cloudpipe.pipelib.rst -source/api/nova..compute.disk.rst -source/api/nova..compute.instance_types.rst -source/api/nova..compute.manager.rst -source/api/nova..compute.monitor.rst -source/api/nova..compute.power_state.rst -source/api/nova..context.rst -source/api/nova..crypto.rst -source/api/nova..db.api.rst -source/api/nova..db.sqlalchemy.api.rst -source/api/nova..db.sqlalchemy.models.rst -source/api/nova..db.sqlalchemy.session.rst -source/api/nova..exception.rst -source/api/nova..fakerabbit.rst -source/api/nova..flags.rst -source/api/nova..image.service.rst -source/api/nova..manager.rst -source/api/nova..network.linux_net.rst -source/api/nova..network.manager.rst -source/api/nova..objectstore.bucket.rst -source/api/nova..objectstore.handler.rst -source/api/nova..objectstore.image.rst -source/api/nova..objectstore.stored.rst -source/api/nova..process.rst -source/api/nova..quota.rst -source/api/nova..rpc.rst -source/api/nova..scheduler.chance.rst -source/api/nova..scheduler.driver.rst -source/api/nova..scheduler.manager.rst -source/api/nova..scheduler.simple.rst -source/api/nova..server.rst -source/api/nova..service.rst -source/api/nova..test.rst -source/api/nova..tests.access_unittest.rst -source/api/nova..tests.api.fakes.rst -source/api/nova..tests.api.openstack.fakes.rst -source/api/nova..tests.api.openstack.test_api.rst -source/api/nova..tests.api.openstack.test_auth.rst -source/api/nova..tests.api.openstack.test_faults.rst -source/api/nova..tests.api.openstack.test_flavors.rst -source/api/nova..tests.api.openstack.test_images.rst -source/api/nova..tests.api.openstack.test_ratelimiting.rst -source/api/nova..tests.api.openstack.test_servers.rst -source/api/nova..tests.api.openstack.test_sharedipgroups.rst -source/api/nova..tests.api.test_wsgi.rst -source/api/nova..tests.api_integration.rst -source/api/nova..tests.api_unittest.rst -source/api/nova..tests.auth_unittest.rst -source/api/nova..tests.cloud_unittest.rst -source/api/nova..tests.compute_unittest.rst -source/api/nova..tests.declare_flags.rst -source/api/nova..tests.fake_flags.rst -source/api/nova..tests.flags_unittest.rst -source/api/nova..tests.network_unittest.rst -source/api/nova..tests.objectstore_unittest.rst -source/api/nova..tests.process_unittest.rst -source/api/nova..tests.quota_unittest.rst -source/api/nova..tests.real_flags.rst -source/api/nova..tests.rpc_unittest.rst -source/api/nova..tests.runtime_flags.rst -source/api/nova..tests.scheduler_unittest.rst -source/api/nova..tests.service_unittest.rst -source/api/nova..tests.twistd_unittest.rst -source/api/nova..tests.validator_unittest.rst -source/api/nova..tests.virt_unittest.rst -source/api/nova..tests.volume_unittest.rst -source/api/nova..twistd.rst -source/api/nova..utils.rst -source/api/nova..validate.rst -source/api/nova..virt.connection.rst -source/api/nova..virt.fake.rst -source/api/nova..virt.images.rst -source/api/nova..virt.libvirt_conn.rst -source/api/nova..virt.xenapi.rst -source/api/nova..volume.driver.rst -source/api/nova..volume.manager.rst -source/api/nova..wsgi.rst -source/api/autoindex.rst diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst index ce368fab8ddc..9d856458a62e 100644 --- a/doc/source/adminguide/distros/ubuntu.10.04.rst +++ b/doc/source/adminguide/distros/ubuntu.10.04.rst @@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian ( Step 2: Install dependencies ---------------------------- -Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first. +Nova requires rabbitmq for messaging, so install that first. *Note:* You must have sudo installed to run these commands as shown here. :: - sudo apt-get install rabbitmq-server redis-server + sudo apt-get install rabbitmq-server You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. @@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl :: - sudo apt-get install python-twisted - - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2 - sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list' - sudo apt-get update && sudo apt-get install python-gflags + sudo add-get install python-software-properties + sudo add-apt-repository ppa:nova-core/trunk + sudo apt-get update + sudo apt-get install python-twisted python-gflags Once you've done this, continue at Step 3 here: :doc:`../single.node.install` diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/adminguide/getting.started.rst index 3e8073606920..0cadeb45eb02 100644 --- a/doc/source/adminguide/getting.started.rst +++ b/doc/source/adminguide/getting.started.rst @@ -76,11 +76,11 @@ External unix tools that are required: * aoetools and vblade-persist (if you use aoe-volumes) Nova uses cutting-edge versions of many packages. There are ubuntu packages in -the nova-core ppa. You can use add this ppa to your sources list on an ubuntu -machine with the following commands:: +the nova-core trunk ppa. You can use add this ppa to your sources list on an +ubuntu machine with the following commands:: sudo apt-get install -y python-software-properties - sudo add-apt-repository ppa:nova-core/ppa + sudo add-apt-repository ppa:nova-core/trunk Recommended ----------- diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index fcb76c5e5383..a652e44b7d5c 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -46,12 +46,12 @@ Assumptions Step 1 Use apt-get to get the latest code ----------------------------------------- -1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa. +1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. :: sudo apt-get install python-software-properties - sudo add-apt-repository ppa:nova-core/ppa + sudo add-apt-repository ppa:nova-core/trunk 2. Run update. @@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20 #. These need to be defined in the nova.conf configuration file:: - --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db - --s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which - # will contain the VM images and buckets - --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted - --cc_host=$CC_ADDR # This is where the the nova-api service lives - --verbose # Optional but very helpful during initial setup - --ec2_url=http://$CC_ADDR:8773/services/Cloud - --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type - - --fixed_range= # ip network to use for VM guests, ex 192.168.2.64/26 - --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 + --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db + --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which + # will contain the VM images and buckets + --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted + --cc_host=$CC_ADDR # This is where the the nova-api service lives + --verbose # Optional but very helpful during initial setup + --ec2_url=http://$CC_ADDR:8773/services/Cloud + --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type + --fixed_range= # ip network to use for VM guests, ex 192.168.2.64/26 + --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 #. Create a nova group:: - sudo addgroup nova + sudo addgroup nova The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password. diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst deleted file mode 100644 index 6265b082be52..000000000000 --- a/doc/source/api/autoindex.rst +++ /dev/null @@ -1,99 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - nova..adminclient.rst - nova..api.cloud.rst - nova..api.ec2.admin.rst - nova..api.ec2.apirequest.rst - nova..api.ec2.cloud.rst - nova..api.ec2.images.rst - nova..api.ec2.metadatarequesthandler.rst - nova..api.openstack.auth.rst - nova..api.openstack.backup_schedules.rst - nova..api.openstack.faults.rst - nova..api.openstack.flavors.rst - nova..api.openstack.images.rst - nova..api.openstack.servers.rst - nova..api.openstack.sharedipgroups.rst - nova..auth.dbdriver.rst - nova..auth.fakeldap.rst - nova..auth.ldapdriver.rst - nova..auth.manager.rst - nova..auth.signer.rst - nova..cloudpipe.pipelib.rst - nova..compute.disk.rst - nova..compute.instance_types.rst - nova..compute.manager.rst - nova..compute.monitor.rst - nova..compute.power_state.rst - nova..context.rst - nova..crypto.rst - nova..db.api.rst - nova..db.sqlalchemy.api.rst - nova..db.sqlalchemy.models.rst - nova..db.sqlalchemy.session.rst - nova..exception.rst - nova..fakerabbit.rst - nova..flags.rst - nova..image.service.rst - nova..manager.rst - nova..network.linux_net.rst - nova..network.manager.rst - nova..objectstore.bucket.rst - nova..objectstore.handler.rst - nova..objectstore.image.rst - nova..objectstore.stored.rst - nova..process.rst - nova..quota.rst - nova..rpc.rst - nova..scheduler.chance.rst - nova..scheduler.driver.rst - nova..scheduler.manager.rst - nova..scheduler.simple.rst - nova..server.rst - nova..service.rst - nova..test.rst - nova..tests.access_unittest.rst - nova..tests.api.fakes.rst - nova..tests.api.openstack.fakes.rst - nova..tests.api.openstack.test_api.rst - nova..tests.api.openstack.test_auth.rst - nova..tests.api.openstack.test_faults.rst - nova..tests.api.openstack.test_flavors.rst - nova..tests.api.openstack.test_images.rst - nova..tests.api.openstack.test_ratelimiting.rst - nova..tests.api.openstack.test_servers.rst - nova..tests.api.openstack.test_sharedipgroups.rst - nova..tests.api.test_wsgi.rst - nova..tests.api_integration.rst - nova..tests.api_unittest.rst - nova..tests.auth_unittest.rst - nova..tests.cloud_unittest.rst - nova..tests.compute_unittest.rst - nova..tests.declare_flags.rst - nova..tests.fake_flags.rst - nova..tests.flags_unittest.rst - nova..tests.network_unittest.rst - nova..tests.objectstore_unittest.rst - nova..tests.process_unittest.rst - nova..tests.quota_unittest.rst - nova..tests.real_flags.rst - nova..tests.rpc_unittest.rst - nova..tests.runtime_flags.rst - nova..tests.scheduler_unittest.rst - nova..tests.service_unittest.rst - nova..tests.twistd_unittest.rst - nova..tests.validator_unittest.rst - nova..tests.virt_unittest.rst - nova..tests.volume_unittest.rst - nova..twistd.rst - nova..utils.rst - nova..validate.rst - nova..virt.connection.rst - nova..virt.fake.rst - nova..virt.images.rst - nova..virt.libvirt_conn.rst - nova..virt.xenapi.rst - nova..volume.driver.rst - nova..volume.manager.rst - nova..wsgi.rst diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst deleted file mode 100644 index 35fa839e1b1b..000000000000 --- a/doc/source/api/nova..adminclient.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..adminclient` Module -============================================================================== -.. automodule:: nova..adminclient - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.cloud.rst b/doc/source/api/nova..api.cloud.rst deleted file mode 100644 index 4138401858ea..000000000000 --- a/doc/source/api/nova..api.cloud.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.cloud` Module -============================================================================== -.. automodule:: nova..api.cloud - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst deleted file mode 100644 index 4e9ab308b4dd..000000000000 --- a/doc/source/api/nova..api.ec2.admin.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.admin` Module -============================================================================== -.. automodule:: nova..api.ec2.admin - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst deleted file mode 100644 index c17a2ff3ad29..000000000000 --- a/doc/source/api/nova..api.ec2.apirequest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.apirequest` Module -============================================================================== -.. automodule:: nova..api.ec2.apirequest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst deleted file mode 100644 index f6145c217f8c..000000000000 --- a/doc/source/api/nova..api.ec2.cloud.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.cloud` Module -============================================================================== -.. automodule:: nova..api.ec2.cloud - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.images.rst b/doc/source/api/nova..api.ec2.images.rst deleted file mode 100644 index 012d800e47fb..000000000000 --- a/doc/source/api/nova..api.ec2.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.images` Module -============================================================================== -.. automodule:: nova..api.ec2.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst deleted file mode 100644 index 75f5169e5f27..000000000000 --- a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.metadatarequesthandler` Module -============================================================================== -.. automodule:: nova..api.ec2.metadatarequesthandler - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst deleted file mode 100644 index 8c3f8f2da626..000000000000 --- a/doc/source/api/nova..api.openstack.auth.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.auth` Module -============================================================================== -.. automodule:: nova..api.openstack.auth - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst deleted file mode 100644 index 6b406f12db4b..000000000000 --- a/doc/source/api/nova..api.openstack.backup_schedules.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.backup_schedules` Module -============================================================================== -.. automodule:: nova..api.openstack.backup_schedules - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst deleted file mode 100644 index 7b25561f74b7..000000000000 --- a/doc/source/api/nova..api.openstack.faults.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.faults` Module -============================================================================== -.. automodule:: nova..api.openstack.faults - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst deleted file mode 100644 index 0deb724deba1..000000000000 --- a/doc/source/api/nova..api.openstack.flavors.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.flavors` Module -============================================================================== -.. automodule:: nova..api.openstack.flavors - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst deleted file mode 100644 index 82bd5f1e86a3..000000000000 --- a/doc/source/api/nova..api.openstack.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.images` Module -============================================================================== -.. automodule:: nova..api.openstack.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst deleted file mode 100644 index c36856ea2bc5..000000000000 --- a/doc/source/api/nova..api.openstack.servers.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.servers` Module -============================================================================== -.. automodule:: nova..api.openstack.servers - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.sharedipgroups.rst b/doc/source/api/nova..api.openstack.sharedipgroups.rst deleted file mode 100644 index 07632acc8e49..000000000000 --- a/doc/source/api/nova..api.openstack.sharedipgroups.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.sharedipgroups` Module -============================================================================== -.. automodule:: nova..api.openstack.sharedipgroups - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst deleted file mode 100644 index 7de68b6e08bc..000000000000 --- a/doc/source/api/nova..auth.dbdriver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.dbdriver` Module -============================================================================== -.. automodule:: nova..auth.dbdriver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst deleted file mode 100644 index ca8a3ad4d560..000000000000 --- a/doc/source/api/nova..auth.fakeldap.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.fakeldap` Module -============================================================================== -.. automodule:: nova..auth.fakeldap - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst deleted file mode 100644 index c444635228f8..000000000000 --- a/doc/source/api/nova..auth.ldapdriver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.ldapdriver` Module -============================================================================== -.. automodule:: nova..auth.ldapdriver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst deleted file mode 100644 index bc5ce2ec31e8..000000000000 --- a/doc/source/api/nova..auth.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.manager` Module -============================================================================== -.. automodule:: nova..auth.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst deleted file mode 100644 index aad824eada39..000000000000 --- a/doc/source/api/nova..auth.signer.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.signer` Module -============================================================================== -.. automodule:: nova..auth.signer - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst deleted file mode 100644 index 054aaf484fe2..000000000000 --- a/doc/source/api/nova..cloudpipe.pipelib.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..cloudpipe.pipelib` Module -============================================================================== -.. automodule:: nova..cloudpipe.pipelib - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.disk.rst b/doc/source/api/nova..compute.disk.rst deleted file mode 100644 index 6410af6f3899..000000000000 --- a/doc/source/api/nova..compute.disk.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.disk` Module -============================================================================== -.. automodule:: nova..compute.disk - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst deleted file mode 100644 index d206ff3a4a1f..000000000000 --- a/doc/source/api/nova..compute.instance_types.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.instance_types` Module -============================================================================== -.. automodule:: nova..compute.instance_types - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst deleted file mode 100644 index 33a337c39843..000000000000 --- a/doc/source/api/nova..compute.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.manager` Module -============================================================================== -.. automodule:: nova..compute.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst deleted file mode 100644 index a91169ecd309..000000000000 --- a/doc/source/api/nova..compute.monitor.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.monitor` Module -============================================================================== -.. automodule:: nova..compute.monitor - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst deleted file mode 100644 index 41b1080e5a0e..000000000000 --- a/doc/source/api/nova..compute.power_state.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.power_state` Module -============================================================================== -.. automodule:: nova..compute.power_state - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst deleted file mode 100644 index 9de1adb24827..000000000000 --- a/doc/source/api/nova..context.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..context` Module -============================================================================== -.. automodule:: nova..context - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst deleted file mode 100644 index af9f636344ab..000000000000 --- a/doc/source/api/nova..crypto.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..crypto` Module -============================================================================== -.. automodule:: nova..crypto - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst deleted file mode 100644 index 6d998fbb2f90..000000000000 --- a/doc/source/api/nova..db.api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.api` Module -============================================================================== -.. automodule:: nova..db.api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst deleted file mode 100644 index 76d0c1bd376f..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.api` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst deleted file mode 100644 index 9c795d7f51f5..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.models.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.models` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.models - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst deleted file mode 100644 index cbfd6416a888..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.session.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.session` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.session - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst deleted file mode 100644 index 97ac6b752d27..000000000000 --- a/doc/source/api/nova..exception.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..exception` Module -============================================================================== -.. automodule:: nova..exception - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst deleted file mode 100644 index f1e27c2664fb..000000000000 --- a/doc/source/api/nova..fakerabbit.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..fakerabbit` Module -============================================================================== -.. automodule:: nova..fakerabbit - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst deleted file mode 100644 index 08165be44f46..000000000000 --- a/doc/source/api/nova..flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..flags` Module -============================================================================== -.. automodule:: nova..flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst deleted file mode 100644 index 78ef1eccadac..000000000000 --- a/doc/source/api/nova..image.service.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..image.service` Module -============================================================================== -.. automodule:: nova..image.service - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst deleted file mode 100644 index 576902491c7e..000000000000 --- a/doc/source/api/nova..manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..manager` Module -============================================================================== -.. automodule:: nova..manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst deleted file mode 100644 index 7af78d5ade7f..000000000000 --- a/doc/source/api/nova..network.linux_net.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..network.linux_net` Module -============================================================================== -.. automodule:: nova..network.linux_net - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst deleted file mode 100644 index 0ea70553325e..000000000000 --- a/doc/source/api/nova..network.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..network.manager` Module -============================================================================== -.. automodule:: nova..network.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst deleted file mode 100644 index 3bfdf639ccd4..000000000000 --- a/doc/source/api/nova..objectstore.bucket.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.bucket` Module -============================================================================== -.. automodule:: nova..objectstore.bucket - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst deleted file mode 100644 index 0eb8c4efb497..000000000000 --- a/doc/source/api/nova..objectstore.handler.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.handler` Module -============================================================================== -.. automodule:: nova..objectstore.handler - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst deleted file mode 100644 index fa4c971f119a..000000000000 --- a/doc/source/api/nova..objectstore.image.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.image` Module -============================================================================== -.. automodule:: nova..objectstore.image - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst deleted file mode 100644 index 2b1d997a3fd0..000000000000 --- a/doc/source/api/nova..objectstore.stored.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.stored` Module -============================================================================== -.. automodule:: nova..objectstore.stored - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..process.rst b/doc/source/api/nova..process.rst deleted file mode 100644 index 91eff8379070..000000000000 --- a/doc/source/api/nova..process.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..process` Module -============================================================================== -.. automodule:: nova..process - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst deleted file mode 100644 index 4140d95d66ac..000000000000 --- a/doc/source/api/nova..quota.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..quota` Module -============================================================================== -.. automodule:: nova..quota - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst deleted file mode 100644 index 5b2a9b8e2333..000000000000 --- a/doc/source/api/nova..rpc.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..rpc` Module -============================================================================== -.. automodule:: nova..rpc - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst deleted file mode 100644 index 89c074c8f6c7..000000000000 --- a/doc/source/api/nova..scheduler.chance.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.chance` Module -============================================================================== -.. automodule:: nova..scheduler.chance - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst deleted file mode 100644 index 793ed9c7b3ae..000000000000 --- a/doc/source/api/nova..scheduler.driver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.driver` Module -============================================================================== -.. automodule:: nova..scheduler.driver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst deleted file mode 100644 index d0fc7c4230db..000000000000 --- a/doc/source/api/nova..scheduler.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.manager` Module -============================================================================== -.. automodule:: nova..scheduler.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst deleted file mode 100644 index dacc2cf3088c..000000000000 --- a/doc/source/api/nova..scheduler.simple.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.simple` Module -============================================================================== -.. automodule:: nova..scheduler.simple - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..server.rst b/doc/source/api/nova..server.rst deleted file mode 100644 index 7cb2cfa5465d..000000000000 --- a/doc/source/api/nova..server.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..server` Module -============================================================================== -.. automodule:: nova..server - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst deleted file mode 100644 index 2d2dfcf2e52a..000000000000 --- a/doc/source/api/nova..service.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..service` Module -============================================================================== -.. automodule:: nova..service - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst deleted file mode 100644 index a6bdb6f1fffa..000000000000 --- a/doc/source/api/nova..test.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..test` Module -============================================================================== -.. automodule:: nova..test - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.access_unittest.rst b/doc/source/api/nova..tests.access_unittest.rst deleted file mode 100644 index 89554e430a4f..000000000000 --- a/doc/source/api/nova..tests.access_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.access_unittest` Module -============================================================================== -.. automodule:: nova..tests.access_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.fakes.rst b/doc/source/api/nova..tests.api.fakes.rst deleted file mode 100644 index 5728b18f3616..000000000000 --- a/doc/source/api/nova..tests.api.fakes.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.fakes` Module -============================================================================== -.. automodule:: nova..tests.api.fakes - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst deleted file mode 100644 index 4a9ff593899f..000000000000 --- a/doc/source/api/nova..tests.api.openstack.fakes.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.fakes` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.fakes - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst deleted file mode 100644 index 68106d221f83..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_api` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst deleted file mode 100644 index 9f0011669a88..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_auth.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_auth` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_auth - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst deleted file mode 100644 index b839ae8a3c77..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_faults.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_faults` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_faults - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst deleted file mode 100644 index 471fac56ec84..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_flavors.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_flavors` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_flavors - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst deleted file mode 100644 index 57ae93c8c63e..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_images` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst deleted file mode 100644 index 9a857f795941..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_ratelimiting` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_ratelimiting - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst deleted file mode 100644 index ea602e6ab16c..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_servers.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_servers` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_servers - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst b/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst deleted file mode 100644 index 1fad49147d62..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_sharedipgroups` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_sharedipgroups - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst deleted file mode 100644 index 8e79caa4dd73..000000000000 --- a/doc/source/api/nova..tests.api.test_wsgi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.test_wsgi` Module -============================================================================== -.. automodule:: nova..tests.api.test_wsgi - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api_integration.rst b/doc/source/api/nova..tests.api_integration.rst deleted file mode 100644 index fd217acf73c0..000000000000 --- a/doc/source/api/nova..tests.api_integration.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api_integration` Module -============================================================================== -.. automodule:: nova..tests.api_integration - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api_unittest.rst b/doc/source/api/nova..tests.api_unittest.rst deleted file mode 100644 index 44a65d48c463..000000000000 --- a/doc/source/api/nova..tests.api_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api_unittest` Module -============================================================================== -.. automodule:: nova..tests.api_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.auth_unittest.rst b/doc/source/api/nova..tests.auth_unittest.rst deleted file mode 100644 index 5805dcf38b8e..000000000000 --- a/doc/source/api/nova..tests.auth_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.auth_unittest` Module -============================================================================== -.. automodule:: nova..tests.auth_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.cloud_unittest.rst b/doc/source/api/nova..tests.cloud_unittest.rst deleted file mode 100644 index d2ca3b013579..000000000000 --- a/doc/source/api/nova..tests.cloud_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.cloud_unittest` Module -============================================================================== -.. automodule:: nova..tests.cloud_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.compute_unittest.rst b/doc/source/api/nova..tests.compute_unittest.rst deleted file mode 100644 index 6a30bf7441f7..000000000000 --- a/doc/source/api/nova..tests.compute_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.compute_unittest` Module -============================================================================== -.. automodule:: nova..tests.compute_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst deleted file mode 100644 index 524e72e9133d..000000000000 --- a/doc/source/api/nova..tests.declare_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.declare_flags` Module -============================================================================== -.. automodule:: nova..tests.declare_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst deleted file mode 100644 index a8dc3df36efe..000000000000 --- a/doc/source/api/nova..tests.fake_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.fake_flags` Module -============================================================================== -.. automodule:: nova..tests.fake_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.flags_unittest.rst b/doc/source/api/nova..tests.flags_unittest.rst deleted file mode 100644 index 61087e683336..000000000000 --- a/doc/source/api/nova..tests.flags_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.flags_unittest` Module -============================================================================== -.. automodule:: nova..tests.flags_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.network_unittest.rst b/doc/source/api/nova..tests.network_unittest.rst deleted file mode 100644 index df057d813efb..000000000000 --- a/doc/source/api/nova..tests.network_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.network_unittest` Module -============================================================================== -.. automodule:: nova..tests.network_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst deleted file mode 100644 index 0ae252f049c7..000000000000 --- a/doc/source/api/nova..tests.objectstore_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.objectstore_unittest` Module -============================================================================== -.. automodule:: nova..tests.objectstore_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.process_unittest.rst b/doc/source/api/nova..tests.process_unittest.rst deleted file mode 100644 index 30d1e129c942..000000000000 --- a/doc/source/api/nova..tests.process_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.process_unittest` Module -============================================================================== -.. automodule:: nova..tests.process_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.quota_unittest.rst b/doc/source/api/nova..tests.quota_unittest.rst deleted file mode 100644 index 6ab81310489d..000000000000 --- a/doc/source/api/nova..tests.quota_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.quota_unittest` Module -============================================================================== -.. automodule:: nova..tests.quota_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst deleted file mode 100644 index e9c0d1abdc66..000000000000 --- a/doc/source/api/nova..tests.real_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.real_flags` Module -============================================================================== -.. automodule:: nova..tests.real_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.rpc_unittest.rst b/doc/source/api/nova..tests.rpc_unittest.rst deleted file mode 100644 index e6c7ceb2e04a..000000000000 --- a/doc/source/api/nova..tests.rpc_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.rpc_unittest` Module -============================================================================== -.. automodule:: nova..tests.rpc_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst deleted file mode 100644 index 984e21199b4c..000000000000 --- a/doc/source/api/nova..tests.runtime_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.runtime_flags` Module -============================================================================== -.. automodule:: nova..tests.runtime_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.scheduler_unittest.rst b/doc/source/api/nova..tests.scheduler_unittest.rst deleted file mode 100644 index ae3a0661658d..000000000000 --- a/doc/source/api/nova..tests.scheduler_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.scheduler_unittest` Module -============================================================================== -.. automodule:: nova..tests.scheduler_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.service_unittest.rst b/doc/source/api/nova..tests.service_unittest.rst deleted file mode 100644 index c7c746d17e10..000000000000 --- a/doc/source/api/nova..tests.service_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.service_unittest` Module -============================================================================== -.. automodule:: nova..tests.service_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.twistd_unittest.rst b/doc/source/api/nova..tests.twistd_unittest.rst deleted file mode 100644 index ce88202e102a..000000000000 --- a/doc/source/api/nova..tests.twistd_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.twistd_unittest` Module -============================================================================== -.. automodule:: nova..tests.twistd_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.validator_unittest.rst b/doc/source/api/nova..tests.validator_unittest.rst deleted file mode 100644 index 980284327f89..000000000000 --- a/doc/source/api/nova..tests.validator_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.validator_unittest` Module -============================================================================== -.. automodule:: nova..tests.validator_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.virt_unittest.rst b/doc/source/api/nova..tests.virt_unittest.rst deleted file mode 100644 index 2189be41ef96..000000000000 --- a/doc/source/api/nova..tests.virt_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.virt_unittest` Module -============================================================================== -.. automodule:: nova..tests.virt_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.volume_unittest.rst b/doc/source/api/nova..tests.volume_unittest.rst deleted file mode 100644 index 791e192f5339..000000000000 --- a/doc/source/api/nova..tests.volume_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.volume_unittest` Module -============================================================================== -.. automodule:: nova..tests.volume_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst deleted file mode 100644 index d4145396db02..000000000000 --- a/doc/source/api/nova..twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..twistd` Module -============================================================================== -.. automodule:: nova..twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst deleted file mode 100644 index 1131d1080942..000000000000 --- a/doc/source/api/nova..utils.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..utils` Module -============================================================================== -.. automodule:: nova..utils - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..validate.rst b/doc/source/api/nova..validate.rst deleted file mode 100644 index 1d142f103259..000000000000 --- a/doc/source/api/nova..validate.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..validate` Module -============================================================================== -.. automodule:: nova..validate - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst deleted file mode 100644 index caf76676514e..000000000000 --- a/doc/source/api/nova..virt.connection.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.connection` Module -============================================================================== -.. automodule:: nova..virt.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst deleted file mode 100644 index 06ecdbf7d688..000000000000 --- a/doc/source/api/nova..virt.fake.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.fake` Module -============================================================================== -.. automodule:: nova..virt.fake - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst deleted file mode 100644 index 4fdeb7af8dc7..000000000000 --- a/doc/source/api/nova..virt.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.images` Module -============================================================================== -.. automodule:: nova..virt.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst deleted file mode 100644 index 7fb8aed5fe89..000000000000 --- a/doc/source/api/nova..virt.libvirt_conn.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.libvirt_conn` Module -============================================================================== -.. automodule:: nova..virt.libvirt_conn - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.rst b/doc/source/api/nova..virt.xenapi.rst deleted file mode 100644 index 2e396bf06ccf..000000000000 --- a/doc/source/api/nova..virt.xenapi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.xenapi` Module -============================================================================== -.. automodule:: nova..virt.xenapi - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst deleted file mode 100644 index 51f5c07295c0..000000000000 --- a/doc/source/api/nova..volume.driver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..volume.driver` Module -============================================================================== -.. automodule:: nova..volume.driver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst deleted file mode 100644 index 91a192a8f5ef..000000000000 --- a/doc/source/api/nova..volume.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..volume.manager` Module -============================================================================== -.. automodule:: nova..volume.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst deleted file mode 100644 index 0bff1c332399..000000000000 --- a/doc/source/api/nova..wsgi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..wsgi` Module -============================================================================== -.. automodule:: nova..wsgi - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/community.rst b/doc/source/community.rst index bfb93414ce1e..01ff5f055aa2 100644 --- a/doc/source/community.rst +++ b/doc/source/community.rst @@ -35,7 +35,8 @@ Contributing Code To contribute code, sign up for a Launchpad account and sign a contributor license agreement, available on the `OpenStack Wiki `_. Once the CLA is signed you -can contribute code through the Bazaar version control system which is related to your Launchpad account. +can contribute code through the Bazaar version control system which is related to your Launchpad +account. See the :doc:`devref/development.environment` page to get started. #openstack on Freenode IRC Network ---------------------------------- diff --git a/doc/source/conf.py b/doc/source/conf.py index 8f1b370ccbab..996dfb0a7ff5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = '2011.1' +from nova import version as nova_version +#import nova.version # The full version, including alpha/beta/rc tags. -release = '2011.1-prerelease' +release = nova_version.version_string() +# The short X.Y version. +version = nova_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst index 6484613df2b0..4baa46e20568 100644 --- a/doc/source/devref/addmethod.openstackapi.rst +++ b/doc/source/devref/addmethod.openstackapi.rst @@ -24,7 +24,7 @@ Routing To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information. -URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ . +URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. @@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju Controllers and actions ----------------------- -Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller. +Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller. -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. @@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. list contains tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. instead of 4). -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Faults ------ diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 6344c5382ced..3de2e228748b 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -88,7 +88,12 @@ Here's how to get the latest code:: source .nova_venv/bin/activate ./run_tests.sh -And then you can do cleaning work or hack hack hack with a branched named cleaning:: +Then you can do cleaning work or hack hack hack with a branched named cleaning. + +Contributing Your Work +---------------------- + +Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad:: bzr push lp:~launchpaduserid/nova/cleaning diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst index 423284a55c47..ae0bac49dfad 100644 --- a/doc/source/devref/rabbit.rst +++ b/doc/source/devref/rabbit.rst @@ -71,8 +71,8 @@ RPC Casts The diagram below the message flow during an rp.cast operation: - 1. a Topic Publisher is instantiated to send the message request to the queuing system. - 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. + 1. A Topic Publisher is instantiated to send the message request to the queuing system. + 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rabbit/flow2.png :width: 60% diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index 18368546bdc2..fb3969a433e6 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) - .. note:: The database schema is available on the `OpenStack Wiki _`. + .. note:: The database schema is available on the `OpenStack Wiki `_. Concept: Storage ---------------- @@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool. Flat DHCP Mode ~~~~~~~~~~~~~~ -This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. +This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. VLAN DHCP Mode ~~~~~~~~~~~~~~ -This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe `) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here `. +This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe `) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here `. The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds) @@ -154,16 +154,16 @@ Concept: nova-manage -------------------- The nova-manage command is used to perform many essential functions for -administration and ongoing maintenance of nova, such as user creation, +administration and ongoing maintenance of Nova, such as user creation, vpn management, and much more. -See doc:`nova.manage` in the Administration Guide for more details. +See :doc:`nova.manage` in the Administration Guide for more details. Concept: Flags -------------- -Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. +Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. Concept: Plugins @@ -181,7 +181,7 @@ Concept: Plugins Concept: IPC/RPC ---------------- -Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. +Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. Concept: Fakes -------------- diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index ae2b64d8a51d..fa5d96738ca2 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see * HOST_IP * Default: address of first interface from the ifconfig command * Values: 127.0.0.1, or any other valid address - -TEST -~~~~ - -**Default**: 0 -**Values**: 1, run tests after checkout and initial setup - -USE_MYSQL -~~~~~~~~~ - -**Default**: 0, use sqlite3 -**Values**: 1, use mysql instead of sqlite3 - -MYSQL_PASS -~~~~~~~~~~ - -Only useful if $USE_MYSQL=1. - -**Default**: nova -**Values**: value of root password for mysql - -USE_LDAP -~~~~~~~~ - -**Default**: 0, use :mod:`nova.auth.dbdriver` -**Values**: 1, use :mod:`nova.auth.ldapdriver` - -LIBVIRT_TYPE -~~~~~~~~~~~~ - -**Default**: qemu -**Values**: uml, kvm +* TEST + * Default: 0 + * Values: 1, run tests after checkout and initial setup +* USE_MYSQL + * Default: 0, use sqlite3 + * Values: 1, use mysql instead of sqlite3 +* MYSQL_PASS (Only useful if $USE_MYSQL=1) + * Default: nova + * Values: value of root password for mysql +* USE_LDAP + * Default: 0, use :mod:`nova.auth.dbdriver` + * Values: 1, use :mod:`nova.auth.ldapdriver` +* LIBVIRT_TYPE + * Default: qemu + * Values: uml, kvm Usage ----- diff --git a/etc/nova-api.conf b/etc/nova-api.conf new file mode 100644 index 000000000000..c5dd0aaec450 --- /dev/null +++ b/etc/nova-api.conf @@ -0,0 +1,63 @@ +[DEFAULT] +verbose = 1 +ec2_port = 8773 +ec2_address = 0.0.0.0 +openstack_port = 8774 +openstack_address = 0.0.0.0 + +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/: ec2versions +/services: ec2api +/latest: ec2metadata +/200: ec2metadata +/1.0: ec2metadata + +[pipeline:ec2api] +pipeline = authenticate router authorizer ec2executor + +[filter:authenticate] +paste.filter_factory = nova.api.ec2:authenticate_factory + +[filter:router] +paste.filter_factory = nova.api.ec2:router_factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:authorizer_factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:executor_factory + +[app:ec2versions] +paste.app_factory = nova.api.ec2:versions_factory + +[app:ec2metadata] +paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory + +############# +# Openstack # +############# + +[composite:openstack] +use = egg:Paste#urlmap +/: osversions +/v1.0: openstackapi + +[pipeline:openstackapi] +pipeline = auth ratelimit osapi + +[filter:auth] +paste.filter_factory = nova.api.openstack.auth:auth_factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory + +[app:osapi] +paste.app_factory = nova.api.openstack:router_factory + +[app:osversions] +paste.app_factory = nova.api.openstack:versions_factory diff --git a/locale/nova.pot b/locale/nova.pot new file mode 100644 index 000000000000..a96411e33922 --- /dev/null +++ b/locale/nova.pot @@ -0,0 +1,2130 @@ +# Translations template for nova. +# Copyright (C) 2011 ORGANIZATION +# This file is distributed under the same license as the nova project. +# FIRST AUTHOR , 2011. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: nova 2011.1\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.4\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out " +"for %d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + diff --git a/nova/adminclient.py b/nova/adminclient.py index eabfce804400..b2609c8c40e2 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,10 +23,8 @@ import base64 import boto import httplib -from nova import flags from boto.ec2.regioninfo import RegionInfo -FLAGS = flags.FLAGS DEFAULT_CLC_URL = 'http://127.0.0.1:8773' DEFAULT_REGION = 'nova' @@ -198,8 +196,8 @@ class NovaAdminClient(object): self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION, - access_key=FLAGS.aws_access_key_id, - secret_key=FLAGS.aws_secret_access_key, + access_key=None, + secret_key=None, **kwargs): parts = self.split_clc_url(clc_url) diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 26fed847bb15..803470570064 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -24,7 +24,6 @@ Root WSGI middleware for all API controllers. :ec2api_subdomain: subdomain running the EC2 API (default: ec2) """ -import logging import routes import webob.dec diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 51d33bcc66e7..2fa1f636cec8 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,7 @@ Starting point for routing EC2 requests. """ -import logging +import datetime import routes import webob import webob.dec @@ -29,6 +29,7 @@ import webob.exc from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import wsgi from nova.api.ec2 import apirequest from nova.api.ec2 import admin @@ -37,6 +38,7 @@ from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.api") flags.DEFINE_boolean('use_forwarded_for', False, 'Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') @@ -52,10 +54,6 @@ flags.DEFINE_list('lockout_memcached_servers', None, 'Memcached servers or None for in process cache.') -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) - - class API(wsgi.Middleware): """Routing for all EC2 API requests.""" @@ -64,6 +62,40 @@ class API(wsgi.Middleware): if FLAGS.use_lockout: self.application = Lockout(self.application) + @webob.dec.wsgify + def __call__(self, req): + rv = req.get_response(self.application) + self.log_request_completion(rv, req) + return rv + + def log_request_completion(self, response, request): + controller = request.environ.get('ec2.controller', None) + if controller: + controller = controller.__class__.__name__ + action = request.environ.get('ec2.action', None) + ctxt = request.environ.get('ec2.context', None) + seconds = 'X' + microseconds = 'X' + if ctxt: + delta = datetime.datetime.utcnow() - \ + ctxt.timestamp + seconds = delta.seconds + microseconds = delta.microseconds + LOG.info( + "%s.%ss %s %s %s %s:%s %s [%s] %s %s", + seconds, + microseconds, + request.remote_addr, + request.method, + request.path_info, + controller, + action, + response.status_int, + request.user_agent, + request.content_type, + response.content_type, + context=ctxt) + class Lockout(wsgi.Middleware): """Lockout for x minutes on y failed auths in a z minute period. @@ -98,7 +130,7 @@ class Lockout(wsgi.Middleware): failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: - detail = "Too many failed authentications." + detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: @@ -107,9 +139,9 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - _log.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.' % - (access_key, failures, FLAGS.lockout_minutes)) + LOG.warn(_('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.'), + access_key, failures, FLAGS.lockout_minutes) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res @@ -142,8 +174,9 @@ class Authenticate(wsgi.Middleware): req.method, req.host, req.path) - except exception.Error, ex: - logging.debug(_("Authentication Failure: %s") % ex) + # Be explicit for what exceptions are 403, the rest bubble as 500 + except (exception.NotFound, exception.NotAuthorized) as ex: + LOG.audit(_("Authentication Failure: %s"), str(ex)) raise webob.exc.HTTPForbidden() # Authenticated! @@ -154,6 +187,8 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt + LOG.audit(_('Authenticated Request For %s:%s)'), user.name, + project.name, context=req.environ['ec2.context']) return self.application @@ -189,9 +224,9 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug(_('action: %s') % action) + LOG.debug(_('action: %s'), action) for key, value in args.items(): - _log.debug(_('arg: %s\t\tval: %s') % (key, value)) + LOG.debug(_('arg: %s\t\tval: %s'), key, value) # Success! req.environ['ec2.controller'] = controller @@ -263,6 +298,9 @@ class Authorizer(wsgi.Middleware): if self._matches_any_role(context, allowed_roles): return self.application else: + LOG.audit(_("Unauthorized request for controller=%s " + "and action=%s"), controller_name, action, + context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -294,27 +332,88 @@ class Executor(wsgi.Application): args = req.environ['ec2.action_args'] api_request = apirequest.APIRequest(controller, action) + result = None try: result = api_request.send(context, **args) - req.headers['Content-Type'] = 'text/xml' - return result + except exception.NotFound as ex: + LOG.info(_('NotFound raised: %s'), str(ex), context=context) + return self._error(req, context, type(ex).__name__, str(ex)) except exception.ApiError as ex: - + LOG.exception(_('ApiError raised: %s'), str(ex), context=context) if ex.code: - return self._error(req, ex.code, ex.message) + return self._error(req, context, ex.code, str(ex)) else: - return self._error(req, type(ex).__name__, ex.message) - # TODO(vish): do something more useful with unknown exceptions + return self._error(req, context, type(ex).__name__, str(ex)) except Exception as ex: - return self._error(req, type(ex).__name__, str(ex)) + extra = {'environment': req.environ} + LOG.exception(_('Unexpected error raised: %s'), str(ex), + extra=extra, context=context) + return self._error(req, + context, + 'UnknownError', + _('An unknown error has occurred. ' + 'Please try your request again.')) + else: + resp = webob.Response() + resp.status = 200 + resp.headers['Content-Type'] = 'text/xml' + resp.body = str(result) + return resp - def _error(self, req, code, message): - logging.error("%s: %s", code, message) + def _error(self, req, context, code, message): + LOG.error("%s: %s", code, message, context=context) resp = webob.Response() resp.status = 400 resp.headers['Content-Type'] = 'text/xml' resp.body = str('\n' - '%s' - '%s' - '?' % (code, message)) + '%s' + '%s' + '%s' % + (code, message, context.request_id)) return resp + + +class Versions(wsgi.Application): + + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all EC2 versions.""" + # available api versions + versions = [ + '1.0', + '2007-01-19', + '2007-03-01', + '2007-08-29', + '2007-10-10', + '2007-12-15', + '2008-02-01', + '2008-09-01', + '2009-04-04', + ] + return ''.join('%s\n' % v for v in versions) + + +def authenticate_factory(global_args, **local_args): + def authenticator(app): + return Authenticate(app) + return authenticator + + +def router_factory(global_args, **local_args): + def router(app): + return Router(app) + return router + + +def authorizer_factory(global_args, **local_args): + def authorizer(app): + return Authorizer(app) + return authorizer + + +def executor_factory(global_args, **local_args): + return Executor() + + +def versions_factory(global_args, **local_args): + return Versions() diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index fac01369eca1..758b612e80fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -24,9 +24,13 @@ import base64 from nova import db from nova import exception +from nova import log as logging from nova.auth import manager +LOG = logging.getLogger('nova.api.ec2.admin') + + def user_dict(user, base64_file=None): """Convert the user object to a result dict""" if user: @@ -75,17 +79,18 @@ class AdminController(object): return {'userSet': [user_dict(u) for u in manager.AuthManager().get_users()]} - def register_user(self, _context, name, **_kwargs): + def register_user(self, context, name, **_kwargs): """Creates a new user, and returns generated credentials.""" + LOG.audit(_("Creating new user: %s"), name, context=context) return user_dict(manager.AuthManager().create_user(name)) - def deregister_user(self, _context, name, **_kwargs): + def deregister_user(self, context, name, **_kwargs): """Deletes a single user (NOT undoable.) Should throw an exception if the user has instances, volumes, or buckets remaining. """ + LOG.audit(_("Deleting user: %s"), name, context=context) manager.AuthManager().delete_user(name) - return True def describe_roles(self, context, project_roles=True, **kwargs): @@ -105,15 +110,27 @@ class AdminController(object): operation='add', **kwargs): """Add or remove a role for a user and project.""" if operation == 'add': + if project: + LOG.audit(_("Adding role %s to user %s for project %s"), role, + user, project, context=context) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, user, + context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': + if project: + LOG.audit(_("Removing role %s from user %s for project %s"), + role, user, project, context=context) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + user, context=context) manager.AuthManager().remove_role(user, role, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True - def generate_x509_for_user(self, _context, name, project=None, **kwargs): + def generate_x509_for_user(self, context, name, project=None, **kwargs): """Generates and returns an x509 certificate for a single user. Is usually called from a client that will wrap this with access and secret key info, and return a zip file. @@ -122,6 +139,8 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) + LOG.audit(_("Getting x509 for user: %s on project: %s"), name, + project, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -137,6 +156,8 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" + LOG.audit(_("Create project %s managed by %s"), name, manager_user, + context=context) return project_dict( manager.AuthManager().create_project( name, @@ -146,6 +167,7 @@ class AdminController(object): def deregister_project(self, context, name): """Permanently deletes a project.""" + LOG.audit(_("Delete project: %s"), name, context=context) manager.AuthManager().delete_project(name) return True @@ -159,11 +181,15 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': + LOG.audit(_("Adding user %s to project %s"), user, project, + context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': + LOG.audit(_("Removing user %s from project %s"), user, project, + context=context) manager.AuthManager().remove_from_project(user, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True # FIXME(vish): these host commands don't work yet, perhaps some of the diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index a90fbeb0c2b5..d0b417db1134 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,13 +20,13 @@ APIRequest class """ -import logging import re # TODO(termie): replace minidom with etree from xml.dom import minidom -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +from nova import log as logging + +LOG = logging.getLogger("nova.api.request") _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') @@ -94,7 +94,7 @@ class APIRequest(object): except AttributeError: _error = _('Unsupported API request: controller = %s,' 'action = %s') % (self.controller, self.action) - _log.warning(_error) + LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. raise Exception(_error) @@ -142,7 +142,7 @@ class APIRequest(object): response = xml.toxml() xml.unlink() - _log.debug(response) + LOG.debug(response) return response def _render_dict(self, xml, el, data): @@ -151,7 +151,7 @@ class APIRequest(object): val = data[key] el.appendChild(self._render_data(xml, key, val)) except: - _log.debug(data) + LOG.debug(data) raise def _render_data(self, xml, el_name, data): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index a59131ab5670..17b9a14fb7f0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -24,26 +24,27 @@ datastore. import base64 import datetime -import logging -import re +import IPy import os +from nova import compute from nova import context -import IPy - from nova import crypto from nova import db from nova import exception from nova import flags -from nova import quota +from nova import log as logging +from nova import network from nova import rpc from nova import utils -from nova.compute import api as compute_api +from nova import volume from nova.compute import instance_types FLAGS = flags.FLAGS -flags.DECLARE('storage_availability_zone', 'nova.volume.manager') +flags.DECLARE('service_down_time', 'nova.scheduler.driver') + +LOG = logging.getLogger("nova.api.cloud") InvalidInputException = exception.InvalidInputException @@ -71,16 +72,16 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -def ec2_id_to_internal_id(ec2_id): - """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)""" +def ec2_id_to_id(ec2_id): + """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)""" return int(ec2_id[2:], 36) -def internal_id_to_ec2_id(internal_id): - """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])""" +def id_to_ec2_id(instance_id): + """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])""" digits = [] - while internal_id != 0: - internal_id, remainder = divmod(internal_id, 36) + while instance_id != 0: + instance_id, remainder = divmod(instance_id, 36) digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) return "i-%s" % ''.join(reversed(digits)) @@ -91,10 +92,11 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): - self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) - self.compute_api = compute_api.ComputeAPI(self.network_manager, - self.image_service) + self.network_api = network.API() + self.volume_api = volume.API() + self.compute_api = compute.API(self.image_service, self.network_api, + self.volume_api) self.setup() def __str__(self): @@ -118,7 +120,8 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in self.compute_api.get_instances(context, project_id): + for instance in self.compute_api.get_all(context, + project_id=project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -129,18 +132,9 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, context, security_group): - nodes = set([instance['host'] for instance in security_group.instances - if instance['host'] is not None]) - for node in nodes: - rpc.cast(context, - '%s.%s' % (FLAGS.compute_topic, node), - {"method": "refresh_security_group", - "args": {"security_group_id": security_group.id}}) - def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = db.fixed_ip_get_instance(ctxt, address) + instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) if instance_ref is None: return None mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) @@ -152,7 +146,7 @@ class CloudController(object): hostname = instance_ref['hostname'] floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) - ec2_id = internal_id_to_ec2_id(instance_ref['internal_id']) + ec2_id = id_to_ec2_id(instance_ref['id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -188,9 +182,46 @@ class CloudController(object): return data def describe_availability_zones(self, context, **kwargs): + if ('zone_name' in kwargs and + 'verbose' in kwargs['zone_name'] and + context.is_admin): + return self._describe_availability_zones_verbose(context, + **kwargs) + else: + return self._describe_availability_zones(context, **kwargs) + + def _describe_availability_zones(self, context, **kwargs): return {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} + def _describe_availability_zones_verbose(self, context, **kwargs): + rv = {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + services = db.service_get_all(context) + now = datetime.datetime.utcnow() + hosts = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, + 'zoneState': ''}) + hsvcs = [service for service in services \ + if service['host'] == host] + for svc in hsvcs: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= FLAGS.service_down_time) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + rv['availabilityZoneInfo'].append({ + 'zoneName': '| |- %s' % svc['binary'], + 'zoneState': '%s %s %s' % (active, art, + svc['updated_at'])}) + return rv + def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] @@ -208,6 +239,7 @@ class CloudController(object): FLAGS.cc_host, FLAGS.cc_port, FLAGS.ec2_suffix)}] + return {'regionInfo': regions} def describe_snapshots(self, context, @@ -243,6 +275,7 @@ class CloudController(object): return {'keypairsSet': result} def create_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user.id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], @@ -250,6 +283,7 @@ class CloudController(object): # TODO(vish): when context is no longer an object, pass it here def delete_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user.id, key_name) except exception.NotFound: @@ -356,6 +390,8 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Revoke security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -373,7 +409,8 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) @@ -382,6 +419,8 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Authorize security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -396,7 +435,8 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True @@ -418,6 +458,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) @@ -432,6 +473,7 @@ class CloudController(object): group_ref)]} def delete_security_group(self, context, group_name, **kwargs): + LOG.audit(_("Delete security group %s"), group_name, context=context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -439,10 +481,12 @@ class CloudController(object): return True def get_console_output(self, context, instance_id, **kwargs): + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) # instance_id is passed in as a list of instances ec2_id = instance_id[0] - internal_id = ec2_id_to_internal_id(ec2_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + instance_ref = self.compute_api.get(context, instance_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -460,27 +504,22 @@ class CloudController(object): return self.compute_api.get_ajax_console(context, internal_id) def describe_volumes(self, context, volume_id=None, **kwargs): - if context.user.is_admin(): - volumes = db.volume_get_all(context) - else: - volumes = db.volume_get_all_by_project(context, context.project_id) - + volumes = self.volume_api.get_all(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes - if volume_id is None or v['ec2_id'] in volume_id] - + if volume_id is None or v['id'] in volume_id] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): - internal_id = volume['instance']['internal_id'] - instance_ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = volume['instance']['id'] + instance_ec2_id = id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = volume['ec2_id'] + v['volumeId'] = volume['id'] v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -507,95 +546,18 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): - # check quota - if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) - vol = {} - vol['size'] = size - vol['user_id'] = context.user.id - vol['project_id'] = context.project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - vol['display_name'] = kwargs.get('display_name') - vol['display_description'] = kwargs.get('display_description') - volume_ref = db.volume_create(context, vol) - - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume_ref['id']}}) - + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create(context, size, + kwargs.get('display_name'), + kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - # TODO(vish): abstract status checking? - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - if volume_ref['attach_status'] == "attached": - raise exception.ApiError(_("Volume is already attached")) - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_ref['id'], - "instance_id": instance_ref['id'], - "mountpoint": device}}) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': instance_ref['id'], - 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} - - def detach_volume(self, context, volume_id, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) - instance_ref = db.volume_get_instance(context.elevated(), - volume_ref['id']) - if not instance_ref: - raise exception.ApiError(_("Volume isn't attached to anything!")) - # TODO(vish): abstract status checking? - if volume_ref['status'] == "available": - raise exception.ApiError(_("Volume is already detached")) - try: - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance_ref['id'], - "volume_id": volume_ref['id']}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - db.volume_detached(context) - internal_id = instance_ref['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': internal_id, - 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] + def delete_volume(self, context, volume_id, **kwargs): + self.volume_api.delete(context, volume_id) + return True def update_volume(self, context, volume_id, **kwargs): updatable_fields = ['display_name', 'display_description'] @@ -604,34 +566,65 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - db.volume_update(context, volume_id, kwargs) + self.volume_api.update(context, volume_id, kwargs) return True - def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context) + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id, + instance_id, device, context=context) + self.compute_api.attach_volume(context, instance_id, volume_id, device) + volume = self.volume_api.get(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id} - def _format_describe_instances(self, context): - return {'reservationSet': self._format_instances(context)} + def detach_volume(self, context, volume_id, **kwargs): + LOG.audit(_("Detach volume %s"), volume_id, context=context) + volume = self.volume_api.get(context, volume_id) + instance = self.compute_api.detach_volume(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': id_to_ec2_id(instance['id']), + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id} + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + def describe_instances(self, context, **kwargs): + return self._format_describe_instances(context, **kwargs) + + def _format_describe_instances(self, context, **kwargs): + return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): - i = self._format_instances(context, reservation_id) + i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] - def _format_instances(self, context, reservation_id=None): + def _format_instances(self, context, instance_id=None, **kwargs): reservations = {} - if reservation_id: - instances = db.instance_get_all_by_reservation(context, - reservation_id) + # NOTE(vish): instance_id is an optional list of ids to filter by + if instance_id: + instance_id = [ec2_id_to_id(x) for x in instance_id] + instances = [self.compute_api.get(context, x) for x in instance_id] else: - instances = self.compute_api.get_instances(context) + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - internal_id = instance['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = instance['id'] + ec2_id = id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = instance['image_id'] i['instanceState'] = { @@ -684,8 +677,8 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.user.is_admin(): @@ -696,69 +689,30 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): - # check quota - if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) - # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - public_ip = rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + LOG.audit(_("Allocate address"), context=context) + public_ip = self.network_api.allocate_floating_ip(context) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): We don't know which network host should get the ip - # when we deallocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + LOG.audit(_("Release address %s"), public_ip, context=context) + self.network_api.release_floating_ip(context, public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) - fixed_address = db.instance_get_fixed_address(context, - instance_ref['id']) - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. - network_topic = self.compute_api.get_network_topic(context, - internal_id) - rpc.cast(context, - network_topic, - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip_ref['address'], - "fixed_address": fixed_address}}) + LOG.audit(_("Associate address %s to instance %s"), public_ip, + instance_id, context=context) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.associate_floating_ip(context, instance_id, public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. - if not floating_ip_ref.get('fixed_ip'): - raise exception.ApiError('Address is not associated.') - host = floating_ip_ref['fixed_ip']['network']['host'] - topic = db.queue_get_for(context, FLAGS.network_topic, host) - rpc.cast(context, - topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + LOG.audit(_("Disassociate address %s"), public_ip, context=context) + self.network_api.disassociate_floating_ip(context, public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) - instances = self.compute_api.create_instances(context, + instances = self.compute_api.create(context, instance_types.get_by_type(kwargs.get('instance_type', None)), kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), @@ -766,40 +720,43 @@ class CloudController(object): kernel_id=kwargs.get('kernel_id', None), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), - description=kwargs.get('display_description'), + display_description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), - generate_hostname=internal_id_to_ec2_id) + availability_zone=kwargs.get('placement', {}).get( + 'AvailabilityZone'), + generate_hostname=id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" - logging.debug("Going to start terminating instances") + LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.delete_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.delete(context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" + LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.reboot(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.reboot(context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.rescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.rescue(context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.unrescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.unrescue(context, instance_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -809,24 +766,8 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - internal_id = ec2_id_to_internal_id(ec2_id) - inst = self.compute_api.get_instance(context, internal_id) - db.instance_update(context, inst['id'], kwargs) - return True - - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume_ref = db.volume_get_by_ec2_id(context, volume_id) - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() - db.volume_update(context, volume_ref['id'], {'status': 'deleting', - 'terminated_at': now}) - host = volume_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_ref['id']}}) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.update(context, instance_id, **kwargs) return True def describe_images(self, context, image_id=None, **kwargs): @@ -837,6 +778,7 @@ class CloudController(object): return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): + LOG.audit(_("De-registering image %s"), image_id, context=context) self.image_service.deregister(context, image_id) return {'imageId': image_id} @@ -844,7 +786,8 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) + LOG.audit(_("Registered image %s with id %s"), image_location, + image_id, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): @@ -872,6 +815,7 @@ class CloudController(object): raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) + LOG.audit(_("Updating image %s publicity"), image_id, context=context) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index f832863a9e51..848f0b034359 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -18,15 +18,15 @@ """Metadata request handler.""" -import logging - import webob.dec import webob.exc +from nova import log as logging from nova import flags from nova.api.ec2 import cloud +LOG = logging.getLogger('nova.api.ec2.metadata') FLAGS = flags.FLAGS @@ -72,10 +72,13 @@ class MetadataRequestHandler(object): remote_address = req.headers.get('X-Forwarded-For', remote_address) meta_data = cc.get_metadata(remote_address) if meta_data is None: - logging.error(_('Failed to get metadata for ip: %s') % - remote_address) + LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: raise webob.exc.HTTPNotFound() return self.print_data(data) + + +def metadata_factory(global_args, **local_args): + return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index ea6dff004194..ad203c51f1a1 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,30 +20,24 @@ WSGI middleware for OpenStack API controllers. """ -import json -import time - -import logging import routes -import traceback import webob.dec import webob.exc import webob -from nova import context from nova import flags +from nova import log as logging from nova import utils from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import backup_schedules from nova.api.openstack import flavors from nova.api.openstack import images -from nova.api.openstack import ratelimiting from nova.api.openstack import servers from nova.api.openstack import sharedipgroups -from nova.auth import manager +LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS flags.DEFINE_string('os_api_auth', 'nova.api.openstack.auth.AuthMiddleware', @@ -73,8 +67,7 @@ class API(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - logging.warn(_("Caught error: %s") % str(ex)) - logging.error(traceback.format_exc()) + LOG.exception(_("Caught error: %s"), str(ex)) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -90,7 +83,7 @@ class APIRouter(wsgi.Router): server_members = {'action': 'POST'} if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") + LOG.debug(_("Including admin operations in API.")) server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" @@ -115,3 +108,24 @@ class APIRouter(wsgi.Router): controller=sharedipgroups.Controller()) super(APIRouter, self).__init__(mapper) + + +class Versions(wsgi.Application): + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all OpenStack API versions.""" + response = { + "versions": [ + dict(status="CURRENT", id="v1.0")]} + metadata = { + "application/xml": { + "attributes": dict(version=["status", "id"])}} + return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + +def router_factory(global_cof, **local_conf): + return APIRouter() + + +def versions_factory(global_conf, **local_conf): + return Versions() diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 1dfdd5318c2b..00e817c8de86 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -134,3 +134,9 @@ class AuthMiddleware(wsgi.Middleware): token = self.db.auth_create_token(ctxt, token_dict) return token, user return None, None + + +def auth_factory(global_conf, **local_conf): + def auth(app): + return AuthMiddleware(app) + return auth diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 867ee5a7e3eb..0b239aab850d 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -17,15 +17,14 @@ from webob import exc +from nova import compute from nova import flags from nova import utils from nova import wsgi import nova.api.openstack -import nova.image.service - from nova.api.openstack import common from nova.api.openstack import faults -from nova.compute import api as compute_api +import nova.image.service FLAGS = flags.FLAGS @@ -131,7 +130,7 @@ class Controller(wsgi.Controller): env = self._deserialize(req.body, req) instance_id = env["image"]["serverId"] name = env["image"]["name"] - return compute_api.ComputeAPI().snapshot(context, instance_id, name) + return compute.API().snapshot(context, instance_id, name) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index cbb4b897eb9a..81b83142ffc0 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -219,3 +219,9 @@ class WSGIAppProxy(object): # No delay return None return float(resp.getheader('X-Wait-Seconds')) + + +def ratelimit_factory(global_conf, **local_conf): + def rl(app): + return RateLimitingMiddleware(app) + return rl diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 15082cb542a6..4cc44d352eae 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,17 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import traceback from webob import exc +from nova import compute from nova import exception +from nova import log as logging from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager -from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack @@ -51,7 +51,7 @@ def _translate_detail_keys(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='display_name', id='internal_id') + flavorId='instance_type', name='display_name', id='id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -67,7 +67,7 @@ def _translate_detail_keys(inst): def _translate_keys(inst): """ Coerces into dictionary format, excluding all model attributes save for id and name """ - return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) + return dict(server=dict(id=inst['id'], name=inst['display_name'])) class Controller(wsgi.Controller): @@ -80,7 +80,7 @@ class Controller(wsgi.Controller): "status", "progress"]}}} def __init__(self): - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() super(Controller, self).__init__() def index(self, req): @@ -96,8 +96,7 @@ class Controller(wsgi.Controller): entity_maker - either _translate_detail_keys or _translate_keys """ - instance_list = self.compute_api.get_instances( - req.environ['nova.context']) + instance_list = self.compute_api.get_all(req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) @@ -105,8 +104,7 @@ class Controller(wsgi.Controller): def show(self, req, id): """ Returns server details by server id """ try: - instance = self.compute_api.get_instance( - req.environ['nova.context'], int(id)) + instance = self.compute_api.get(req.environ['nova.context'], id) return _translate_detail_keys(instance) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) @@ -114,8 +112,7 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Destroys a server """ try: - self.compute_api.delete_instance(req.environ['nova.context'], - int(id)) + self.compute_api.delete(req.environ['nova.context'], id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() @@ -128,12 +125,12 @@ class Controller(wsgi.Controller): key_pair = auth_manager.AuthManager.get_key_pairs( req.environ['nova.context'])[0] - instances = self.compute_api.create_instances( + instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), env['server']['imageId'], display_name=env['server']['name'], - description=env['server']['name'], + display_description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key']) return _translate_keys(instances[0]) @@ -151,10 +148,8 @@ class Controller(wsgi.Controller): update_dict['display_name'] = inst_dict['server']['name'] try: - ctxt = req.environ['nova.context'] - self.compute_api.update_instance(ctxt, - id, - **update_dict) + self.compute_api.update(req.environ['nova.context'], id, + **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() @@ -175,6 +170,50 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def lock(self, req, id): + """ + lock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unlock(self, req, id): + """ + unlock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.unlock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::unlock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def get_lock(self, req, id): + """ + return the boolean state of (instance with id)'s lock + + """ + context = req.environ['nova.context'] + try: + self.compute_api.get_lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::get_lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -182,7 +221,7 @@ class Controller(wsgi.Controller): self.compute_api.pause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::pause %s"), readable) + LOG.exception(_("Compute.api::pause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -193,7 +232,7 @@ class Controller(wsgi.Controller): self.compute_api.unpause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::unpause %s"), readable) + LOG.exception(_("Compute.api::unpause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -204,7 +243,7 @@ class Controller(wsgi.Controller): self.compute_api.suspend(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::suspend %s"), readable) + LOG.exception(_("compute.api::suspend %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -215,7 +254,7 @@ class Controller(wsgi.Controller): self.compute_api.resume(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::resume %s"), readable) + LOG.exception(_("compute.api::resume %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -236,4 +275,13 @@ class Controller(wsgi.Controller): def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] - return self.compute_api.get_actions(ctxt, id) + items = self.compute_api.get_actions(ctxt, id) + actions = [] + # TODO(jk0): Do not do pre-serialization here once the default + # serializer is updated + for item in items: + actions.append(dict( + created_at=str(item.created_at), + action=item.action, + error=item.error)) + return dict(actions=actions) diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 47e435cb6801..0eb6fe58862a 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -20,7 +20,6 @@ Auth driver using the DB as its backend. """ -import logging import sys from nova import context diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7616ff112b26..c8de20028e53 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,11 +24,11 @@ other backends by creating another class that exposes the same public methods. """ -import logging import sys from nova import exception from nova import flags +from nova import log as logging FLAGS = flags.FLAGS @@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin', flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') +LOG = logging.getLogger("nova.ldapdriver") + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying @@ -502,8 +504,8 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug(_("Attempted to remove the last member of a group. " - "Deleting the group at %s instead."), group_dn) + LOG.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d3e266952d6e..5685ae5e2919 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -20,7 +20,6 @@ Nova authentication management """ -import logging import os import shutil import string # pylint: disable-msg=W0402 @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import signer @@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src', flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') +LOG = logging.getLogger('nova.auth.manager') + class AuthBase(object): """Base class for objects relating to auth @@ -254,43 +256,51 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info(_('Looking up user: %r'), access_key) + LOG.debug(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) - logging.info('user: %r', user) + LOG.debug('user: %r', user) if user == None: + LOG.audit(_("Failed authorization for access key %s"), access_key) raise exception.NotFound(_('No user found for access key %s') % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user if project_id == '': + LOG.debug(_("Using project name = user name (%s)"), user.name) project_id = user.name project = self.get_project(project_id) if project == None: + LOG.audit(_("failed authorization: no project named %s (user=%s)"), + project_id, user.name) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): + LOG.audit(_("Failed authorization: user %s not admin and not " + "member of project %s"), user.name, project.name) raise exception.NotFound(_('User %s is not a member of project %s') % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) return (user, project) @@ -398,6 +408,12 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + if project: + LOG.audit(_("Adding role %s to user %s in project %s"), role, + User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -418,6 +434,12 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + if project: + LOG.audit(_("Removing role %s from user %s on project %s"), + role, User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) @@ -480,6 +502,8 @@ class AuthManager(object): description, member_users) if project_dict: + LOG.audit(_("Created project %s with manager %s"), name, + manager_user) project = Project(**project_dict) return project @@ -496,6 +520,7 @@ class AuthManager(object): @param project: This will be the new description of the project. """ + LOG.audit(_("modifying project %s"), Project.safe_id(project)) if manager_user: manager_user = User.safe_id(manager_user) with self.driver() as drv: @@ -505,6 +530,8 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" + LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), Project.safe_id(project)) @@ -523,6 +550,8 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" + LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) @@ -549,6 +578,7 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" + LOG.audit(_("Deleting project %s"), Project.safe_id(project)) with self.driver() as drv: drv.delete_project(Project.safe_id(project)) @@ -603,13 +633,16 @@ class AuthManager(object): with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: - return User(**user_dict) + rv = User(**user_dict) + LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + return rv def delete_user(self, user): """Deletes a user Additionally deletes all users key_pairs""" uid = User.safe_id(user) + LOG.audit(_("Deleting user %s"), uid) db.key_pair_destroy_all_by_user(context.get_admin_context(), uid) with self.driver() as drv: @@ -618,6 +651,12 @@ class AuthManager(object): def modify_user(self, user, access_key=None, secret_key=None, admin=None): """Modify credentials for a user""" uid = User.safe_id(user) + if access_key: + LOG.audit(_("Access Key change for user %s"), uid) + if secret_key: + LOG.audit(_("Secret Key change for user %s"), uid) + if admin is not None: + LOG.audit(_("Admin status set to %r for user %s"), admin, uid) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) @@ -666,7 +705,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn(_("No vpn data for project %s"), pid) + LOG.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() diff --git a/nova/auth/signer.py b/nova/auth/signer.py index f7d29f53431e..744e315d4877 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests. import base64 import hashlib import hmac -import logging import urllib # NOTE(vish): for new boto @@ -54,9 +53,13 @@ import boto # NOTE(vish): for old boto import boto.utils +from nova import log as logging from nova.exception import Error +LOG = logging.getLogger('nova.signer') + + class Signer(object): """Hacked up code from boto/connection.py""" @@ -120,7 +123,7 @@ class Signer(object): def _calc_signature_2(self, params, verb, server_string, path): """Generate AWS signature version 2 string.""" - logging.debug('using _calc_signature_2') + LOG.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: current_hmac = self.hmac_256 @@ -136,13 +139,13 @@ class Signer(object): val = urllib.quote(val, safe='-_~') pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s', qs) + LOG.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s', string_to_sign) + LOG.debug('string_to_sign: %s', string_to_sign) current_hmac.update(string_to_sign) b64 = base64.b64encode(current_hmac.digest()) - logging.debug('len(b64)=%d', len(b64)) - logging.debug('base64 encoded digest: %s', b64) + LOG.debug('len(b64)=%d', len(b64)) + LOG.debug('base64 encoded digest: %s', b64) return b64 diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 09361828d5d6..8aefd341f87a 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -22,7 +22,6 @@ an instance with it. """ -import logging import os import string import tempfile @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific @@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask', _('Netmask to push into openvpn config')) -LOG = logging.getLogger('nova-cloudpipe') +LOG = logging.getLogger('nova.cloudpipe') class CloudPipe(object): diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index a5df2ec1acc2..b94f971d1d93 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -16,17 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -:mod:`nova.compute` -- Compute Nodes using LibVirt -===================================================== - -.. automodule:: nova.compute - :platform: Unix - :synopsis: Thin wrapper around libvirt for VM mgmt. -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" +from nova.compute.api import API diff --git a/nova/compute/api.py b/nova/compute/api.py index 0658044d22e8..adf4dbe43ae6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -17,75 +17,82 @@ # under the License. """ -Handles all API requests relating to instances (guest vms). +Handles all requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging +from nova import network from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') -def generate_default_hostname(internal_id): +def generate_default_hostname(instance_id): """Default function to generate a hostname given an instance reference.""" - return str(internal_id) + return str(instance_id) -class ComputeAPI(base.Base): +class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service - super(ComputeAPI, self).__init__(**kwargs) + if not network_api: + network_api = network.API() + self.network_api = network_api + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): try: - instance = self.db.instance_get_by_internal_id(context, - instance_id) + instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, {"method": "get_network_topic", "args": {'fake': 1}}) - def create_instances(self, context, instance_type, image_id, min_count=1, - max_count=1, kernel_id=None, ramdisk_id=None, - display_name='', description='', key_name=None, - key_data=None, security_group='default', - user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and other arguments check out ok.""" - num_instances = quota.allowed_instances(context, max_count, - instance_type) + type_data = instance_types.INSTANCE_TYPES[instance_type] + num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -95,11 +102,11 @@ class ComputeAPI(base.Base): kernel_id = image.get('kernelId', None) if ramdisk_id is None: ramdisk_id = image.get('ramdiskId', None) - #No kernel and ramdisk for raw images + # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) @@ -123,7 +130,6 @@ class ComputeAPI(base.Base): key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] - type_data = instance_types.INSTANCE_TYPES[instance_type] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, @@ -138,21 +144,22 @@ class ComputeAPI(base.Base): 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], 'display_name': display_name, - 'display_description': description, + 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'locked': False, + 'availability_zone': availability_zone} elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] - internal_id = instance['internal_id'] elevated = context.elevated() if not security_groups: @@ -163,14 +170,14 @@ class ComputeAPI(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(internal_id)) + updates = dict(hostname=generate_hostname(instance_id)) if 'display_name' not in instance: - updates['display_name'] = "Server %s" % internal_id + updates['display_name'] = "Server %s" % instance_id - instance = self.update_instance(context, instance_id, **updates) + instance = self.update(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -178,6 +185,9 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -197,7 +207,61 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - def update_instance(self, context, instance_id, **kwargs): + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = self.db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group.id}}) + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + self.db.security_group_rule_get_by_security_group_grantee( + context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. :param context: The security context @@ -211,132 +275,138 @@ class ComputeAPI(base.Base): """ return self.db.instance_update(context, instance_id, kwargs) - def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + def delete(self, context, instance_id): + LOG.debug(_("Going to try and terminate %s"), instance_id) try: - instance = self.db.instance_get_by_internal_id(context, - instance_id) + instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %d was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %d is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return - self.update_instance(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) host = instance['host'] if host: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "terminate_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) else: - self.db.instance_destroy(context, instance['id']) + self.db.instance_destroy(context, instance_id) - def get_instances(self, context, project_id=None): - """Get all instances, possibly filtered by project ID or - user ID. If there is no filter and the context is an admin, - it will retreive all instances in the system.""" + def get(self, context, instance_id): + """Get a single instance with the given ID.""" + return self.db.instance_get_by_id(context, instance_id) + + def get_all(self, context, project_id=None, reservation_id=None, + fixed_ip=None): + """Get all instances, possibly filtered by one of the + given parameters. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, context.user_id) if project_id is None: project_id = context.project_id - return self.db.instance_get_all_by_project(context, project_id) + return self.db.instance_get_all_by_project(context, + project_id) return self.db.instance_get_all(context) - def get_instance(self, context, instance_id): - return self.db.instance_get_by_internal_id(context, instance_id) - def snapshot(self, context, instance_id, name): """Snapshot the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "snapshot_instance", - "args": {"instance_id": instance['id'], "name": name}}) + "args": {"instance_id": instance_id, "name": name}}) def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "reboot_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def pause(self, context, instance_id): """Pause the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "pause_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def unpause(self, context, instance_id): """Unpause the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unpause_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance["host"] return rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "get_diagnostics", - "args": {"instance_id": instance["id"]}}) + "args": {"instance_id": instance_id}}) def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) - return self.db.instance_get_actions(context, instance["id"]) + return self.db.instance_get_actions(context, instance_id) def suspend(self, context, instance_id): """suspend the instance with instance_id""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "suspend_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def resume(self, context, instance_id): """resume the instance with instance_id""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "resume_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "rescue_instance", - "args": {"instance_id": instance['id']}}) + "args": {"instance_id": instance_id}}) def unrescue(self, context, instance_id): """Unrescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -344,11 +414,7 @@ class ComputeAPI(base.Base): "args": {"instance_id": instance['id']}}) def get_ajax_console(self, context, instance_id): - """Get an AJAX Console - - In order for this to work properly, a ttyS0 must be configured - in the instance - """ + """Get a url to an AJAX Console""" instance_ref = db.instance_get_by_internal_id(context, instance_id) @@ -366,3 +432,66 @@ class ComputeAPI(base.Base): return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url, output['token'])} + def lock(self, context, instance_id): + """ + lock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "lock_instance", + "args": {"instance_id": instance['id']}}) + + def unlock(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unlock_instance", + "args": {"instance_id": instance['id']}}) + + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + instance = self.get_instance(context, instance_id) + return instance['locked'] + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd61e..741499294082 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e485a0415938..fd1e983fad6f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,11 @@ terminating it. """ import datetime -import logging +import functools from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -52,6 +53,42 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('stub_network', False, 'Stub network related code') +LOG = logging.getLogger('nova.compute.manager') + + +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(self, context, instance_id, *args, **kwargs): + + LOG.info(_("check_instance_lock: decorating: |%s|"), function, + context=context) + LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, context, instance_id, context=context) + locked = self.get_lock(context, instance_id) + admin = context.is_admin + LOG.info(_("check_instance_lock: locked: |%s|"), locked, + context=context) + LOG.info(_("check_instance_lock: admin: |%s|"), admin, + context=context) + + # if admin or unlocked call function otherwise log error + if admin or not locked: + LOG.info(_("check_instance_lock: executing: |%s|"), function, + context=context) + function(self, context, instance_id, *args, **kwargs) + else: + LOG.error(_("check_instance_lock: not executing |%s|"), + function, context=context) + return False + + return decorated_function + class ComputeManager(manager.Manager): @@ -100,9 +137,16 @@ class ComputeManager(manager.Manager): host) @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_rules(security_group_id) + + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): @@ -111,7 +155,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s: starting..."), instance_id, + context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +194,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -158,17 +203,19 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,15 +227,14 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -202,20 +248,22 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -235,24 +283,22 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s: snapshotting'), instance_id, + context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, instance_ref['state'], power_state.RUNNING) self.driver.snapshot(instance_ref, name) @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: rescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -262,13 +308,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: unrescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -282,13 +327,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: pausing', - instance_ref['internal_id']) + LOG.audit(_('instance %s: pausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -300,13 +344,12 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: unpausing', - instance_ref['internal_id']) + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -323,17 +366,20 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_ref["internal_id"]) + LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception + @checks_instance_lock def suspend_instance(self, context, instance_id): - """suspend the instance with instance_id""" + """ + suspend the instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: suspending'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: suspending'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -344,12 +390,15 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def resume_instance(self, context, instance_id): - """resume the suspended instance with instance_id""" + """ + resume the suspended instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: resuming'), instance_ref['internal_id']) + LOG.audit(_('instance %s: resuming'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -359,31 +408,67 @@ class ComputeManager(manager.Manager): instance_id, result)) + @exception.wrap_exception + def lock_instance(self, context, instance_id): + """ + lock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: locking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': True}) + + @exception.wrap_exception + def unlock_instance(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: unlocking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': False}) + + @exception.wrap_exception + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + context = context.elevated() + LOG.debug(_('instance %s: getting locked state'), instance_id, + context=context) + instance_ref = self.db.instance_get(context, instance_id) + return instance_ref['locked'] + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) return self.driver.get_console_output(instance_ref) @exception.wrap_exception def get_ajax_console(self, context, instance_id): - """Send the console output for an instance.""" + """Return connection information for an ajax console""" context = context.elevated() logging.debug(_("instance %s: getting ajax console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_ajax_console(instance_ref) - @exception.wrap_exception + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -398,8 +483,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -407,17 +492,18 @@ class ComputeManager(manager.Manager): return True @exception.wrap_exception + @checks_instance_lock def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), + volume_id, volume_ref['mountpoint'], instance_id, + context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5ea6e..14d0e8ca1714 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.compute.monitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/crypto.py b/nova/crypto.py index b8405552d12b..a34b940f50d5 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates. import base64 import gettext import hashlib -import logging import os import shutil import struct @@ -39,8 +38,10 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging +LOG = logging.getLogger("nova.crypto") FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder): csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug(_("Flags path: %s") % ca_folder) + LOG.debug(_("Flags path: %s"), ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) diff --git a/nova/db/api.py b/nova/db/api.py index 127f15478f07..a4d26ec85f72 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) """ from nova import exception @@ -37,6 +40,8 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') +flags.DEFINE_boolean('enable_new_services', True, + 'Services to be added to the available pool on create') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -76,6 +81,11 @@ def service_get(context, service_id): return IMPL.service_get(context, service_id) +def service_get_all(context): + """Get a list of all services on any machine on any topic of any type""" + return IMPL.service_get_all(context) + + def service_get_all_by_topic(context, topic): """Get all compute services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) @@ -348,9 +358,9 @@ def instance_get_project_vpn(context, project_id): return IMPL.instance_get_project_vpn(context, project_id) -def instance_get_by_internal_id(context, internal_id): - """Get an instance by internal id.""" - return IMPL.instance_get_by_internal_id(context, internal_id) +def instance_get_by_id(context, instance_id): + """Get an instance by id.""" + return IMPL.instance_get_by_id(context, instance_id) def instance_is_vpn(context, instance_id): @@ -714,7 +724,7 @@ def security_group_get_all(context): def security_group_get(context, security_group_id): - """Get security group by its internal id.""" + """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) @@ -767,6 +777,13 @@ def security_group_rule_get_by_security_group(context, security_group_id): security_group_id) +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 3288ebd20f5e..501373942006 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -19,6 +19,27 @@ """ SQLAlchemy database backend """ +import time + +from sqlalchemy.exc import OperationalError + +from nova import flags +from nova import log as logging from nova.db.sqlalchemy import models -models.register_models() + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.db.sqlalchemy') + + +for i in xrange(FLAGS.sql_max_retries): + if i > 0: + time.sleep(FLAGS.sql_retry_interval) + + try: + models.register_models() + break + except OperationalError: + LOG.exception(_("Data store %s is unreachable." + " Trying again in %d seconds."), + FLAGS.sql_connection, FLAGS.sql_retry_interval) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8e68d12a463c..e475b4d8cba7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import random import warnings from nova import db @@ -135,6 +134,18 @@ def service_get(context, service_id, session=None): return result +@require_admin_context +def service_get_all(context, session=None): + if not session: + session = get_session() + + result = session.query(models.Service).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + return result + + @require_admin_context def service_get_all_by_topic(context, topic): session = get_session() @@ -236,6 +247,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True service_ref.save() return service_ref @@ -604,30 +617,18 @@ def fixed_ip_update(context, address, values): ################### -#TODO(gundlach): instance_create and volume_create are nearly identical -#and should be refactored. I expect there are other copy-and-paste -#functions between the two of them as well. - - @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. - 'internal_id' is auto-generated and should not be specified. """ instance_ref = models.Instance() instance_ref.update(values) session = get_session() with session.begin(): - while instance_ref.internal_id == None: - # Instances have integer internal ids. - internal_id = random.randint(0, 2 ** 31 - 1) - if not instance_internal_id_exists(context, internal_id, - session=session): - instance_ref.internal_id = internal_id instance_ref.save(session=session) return instance_ref @@ -661,7 +662,7 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -669,7 +670,7 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ @@ -749,37 +750,28 @@ def instance_get_project_vpn(context, project_id): @require_context -def instance_get_by_internal_id(context, internal_id): +def instance_get_by_id(context, instance_id): session = get_session() if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ filter_by(project_id=context.project_id).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=False).\ first() if not result: - raise exception.NotFound(_('Instance %s not found') % (internal_id)) + raise exception.NotFound(_('Instance %s not found') % (instance_id)) return result -@require_context -def instance_internal_id_exists(context, internal_id, session=None): - if not session: - session = get_session() - return session.query(exists().\ - where(models.Instance.internal_id == internal_id)).\ - one()[0] - - @require_context def instance_get_fixed_address(context, instance_id): session = get_session() @@ -860,12 +852,9 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - actions = {} - for action in session.query(models.InstanceActions).\ + return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all(): - actions[action.action] = action.error - return actions + all() ################### @@ -1315,10 +1304,6 @@ def volume_create(context, values): session = get_session() with session.begin(): - while volume_ref.ec2_id == None: - ec2_id = utils.generate_uid('vol') - if not volume_ec2_id_exists(context, ec2_id, session=session): - volume_ref.ec2_id = ec2_id volume_ref.save(session=session) return volume_ref @@ -1416,41 +1401,6 @@ def volume_get_all_by_project(context, project_id): all() -@require_context -def volume_get_by_ec2_id(context, ec2_id): - session = get_session() - result = None - - if is_admin_context(context): - result = session.query(models.Volume).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - elif is_user_context(context): - result = session.query(models.Volume).\ - filter_by(project_id=context.project_id).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=False).\ - first() - else: - raise exception.NotAuthorized() - - if not result: - raise exception.NotFound(_('Volume %s not found') % ec2_id) - - return result - - -@require_context -def volume_ec2_id_exists(context, ec2_id, session=None): - if not session: - session = get_session() - - return session.query(exists().\ - where(models.Volume.id == ec2_id)).\ - one()[0] - - @require_admin_context def volume_get_instance(context, volume_id): session = get_session() @@ -1640,6 +1590,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None): return result +@require_context +def security_group_rule_get_by_security_group(context, security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(parent_group_id=security_group_id).\ + all() + else: + # TODO(vish): Join to group and check for project_id + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(parent_group_id=security_group_id).\ + all() + return result + + +@require_context +def security_group_rule_get_by_security_group_grantee(context, + security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(group_id=security_group_id).\ + all() + else: + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(group_id=security_group_id).\ + all() + return result + + @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a050aef23643..1ed366127e65 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -164,11 +164,13 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(Integer, primary_key=True) - internal_id = Column(Integer, unique=True) + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return "instance-%08x" % self.id admin_pass = Column(String(255)) - user_id = Column(String(255)) project_id = Column(String(255)) @@ -180,10 +182,6 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - @property - def name(self): - return "instance-%d" % self.internal_id - image_id = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) @@ -220,10 +218,14 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + availability_zone = Column(String(255)) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) + locked = Column(Boolean) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused @@ -249,8 +251,11 @@ class InstanceActions(BASE, NovaBase): class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' - id = Column(Integer, primary_key=True) - ec2_id = Column(String(12), unique=True) + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return "volume-%08x" % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -276,10 +281,6 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - @property - def name(self): - return self.ec2_id - class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" @@ -543,7 +544,8 @@ def register_models(): """Register Models and create metadata. Called from nova.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere. + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Service, Instance, InstanceActions, diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index e0d84c1075fd..c3876c02a3cb 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, + echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/exception.py b/nova/exception.py index 277033e0f324..7680e534adee 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -21,9 +21,8 @@ Nova base exception handling, including decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ -import logging -import sys -import traceback +from nova import log as logging +LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): @@ -84,7 +83,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception(_('Uncaught exception')) + LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 79d8b894d778..7c2d7177be17 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -18,12 +18,16 @@ """Based a bit on the carrot.backeds.queue backend... but a lot better.""" -import logging import Queue as queue from carrot.backends import base from eventlet import greenthread +from nova import log as logging + + +LOG = logging.getLogger("nova.fakerabbit") + EXCHANGES = {} QUEUES = {} @@ -41,12 +45,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + LOG.debug(_('(%s) publish (key: %s) %s'), + self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug(_('Publishing to route %s'), f) + LOG.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -76,19 +80,19 @@ class Backend(base.BaseBackend): def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: - logging.debug(_('Declaring queue %s'), queue) + LOG.debug(_('Declaring queue %s'), queue) QUEUES[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): global EXCHANGES if exchange not in EXCHANGES: - logging.debug(_('Declaring exchange %s'), exchange) + LOG.debug(_('Declaring exchange %s'), exchange) EXCHANGES[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - logging.debug(_('Binding %s to %s with key %s'), + LOG.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) @@ -113,7 +117,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 6eb0da3ecdd9..42f177cdc419 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,8 +29,6 @@ import sys import gflags -from nova import utils - class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -213,10 +211,10 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') +DEFINE_string('glance_host', '127.0.0.1', 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') +DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -243,8 +241,8 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') -DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') +DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') +DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server') DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') @@ -270,6 +268,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') +DEFINE_string('sql_idle_timeout', + '3600', + 'timeout for idle sql database connections') +DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') +DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') diff --git a/nova/image/glance.py b/nova/image/glance.py index cc3192e7c85e..a3a2f430815f 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -19,20 +19,17 @@ import httplib import json -import logging import urlparse -import webob.exc - -from nova.compute import api as compute_api -from nova import utils -from nova import flags from nova import exception -import nova.image.service +from nova import flags +from nova import log as logging +from nova.image import service + + +LOG = logging.getLogger('nova.image.glance') FLAGS = flags.FLAGS - - flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1', 'IP address or URL where Glance\'s Teller service resides') flags.DEFINE_string('glance_teller_port', '9191', @@ -78,8 +75,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images"), res.status_int) + LOG.warn(_("Parallax returned HTTP error %d from " + "request for /images"), res.status_int) return [] finally: c.close() @@ -97,8 +94,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images/detail"), res.status_int) + LOG.warn(_("Parallax returned HTTP error %d from " + "request for /images/detail"), res.status_int) return [] finally: c.close() @@ -166,7 +163,7 @@ class ParallaxClient(object): c.close() -class GlanceImageService(nova.image.service.BaseImageService): +class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self): diff --git a/nova/log.py b/nova/log.py new file mode 100644 index 000000000000..c1428c051d9d --- /dev/null +++ b/nova/log.py @@ -0,0 +1,254 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. + +It also allows setting of formatting information through flags. +""" + + +import cStringIO +import json +import logging +import logging.handlers +import traceback + +from nova import flags +from nova import version + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('logging_context_format_string', + '(%(name)s %(nova_version)s): %(levelname)s ' + '[%(request_id)s %(user)s ' + '%(project)s] %(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_default_format_string', + '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' + '%(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_debug_format_suffix', + 'from %(processName)s (pid=%(process)d) %(funcName)s' + ' %(pathname)s:%(lineno)d', + 'data to append to log format when level is DEBUG') + +flags.DEFINE_string('logging_exception_prefix', + '(%(name)s): TRACE: ', + 'prefix each line of exception output with this format') + +flags.DEFINE_list('default_log_levels', + ['amqplib=WARN', + 'sqlalchemy=WARN', + 'eventlet.wsgi.server=WARN'], + 'list of logger=LEVEL pairs') + +flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_string('logfile', None, 'output to named file') + + +# A list of things we want to replicate from logging. +# levels +CRITICAL = logging.CRITICAL +FATAL = logging.FATAL +ERROR = logging.ERROR +WARNING = logging.WARNING +WARN = logging.WARN +INFO = logging.INFO +DEBUG = logging.DEBUG +NOTSET = logging.NOTSET +# methods +getLogger = logging.getLogger +debug = logging.debug +info = logging.info +warning = logging.warning +warn = logging.warn +error = logging.error +exception = logging.exception +critical = logging.critical +log = logging.log +# handlers +StreamHandler = logging.StreamHandler +FileHandler = logging.FileHandler +# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. +SysLogHandler = logging.handlers.SysLogHandler + + +# our new audit level +AUDIT = logging.INFO + 1 +logging.addLevelName(AUDIT, 'AUDIT') + + +def _dictify_context(context): + if context == None: + return None + if not isinstance(context, dict) \ + and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def basicConfig(): + logging.basicConfig() + for handler in logging.root.handlers: + handler.setFormatter(_formatter) + if FLAGS.verbose: + logging.root.setLevel(logging.DEBUG) + if FLAGS.use_syslog: + syslog = SysLogHandler(address='/dev/log') + syslog.setFormatter(_formatter) + logging.root.addHandler(syslog) + if FLAGS.logfile: + logfile = FileHandler(FLAGS.logfile) + logfile.setFormatter(_formatter) + logging.root.addHandler(logfile) + + +class NovaLogger(logging.Logger): + """ + NovaLogger manages request context and formatting. + + This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): + level_name = self._get_level_from_flags(name, FLAGS) + level = globals()[level_name] + logging.Logger.__init__(self, name, level) + + def _get_level_from_flags(self, name, FLAGS): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + + def _log(self, level, msg, args, exc_info=None, extra=None, context=None): + """Extract context from any log call""" + if not extra: + extra = {} + if context: + extra.update(_dictify_context(context)) + extra.update({"nova_version": version.version_string_with_vcs()}) + logging.Logger._log(self, level, msg, args, exc_info, extra) + + def addHandler(self, handler): + """Each handler gets our custom formatter""" + handler.setFormatter(_formatter) + logging.Logger.addHandler(self, handler) + + def audit(self, msg, *args, **kwargs): + """Shortcut for our AUDIT level""" + if self.isEnabledFor(AUDIT): + self._log(AUDIT, msg, args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """Logging.exception doesn't handle kwargs, so breaks context""" + if not kwargs.get('exc_info'): + kwargs['exc_info'] = 1 + self.error(msg, *args, **kwargs) + # NOTE(todd): does this really go here, or in _log ? + extra = kwargs.get('extra') + if not extra: + return + env = extra.get('environment') + if env: + env = env.copy() + for k in env.keys(): + if not isinstance(env[k], str): + env.pop(k) + message = "Environment: %s" % json.dumps(env) + kwargs.pop('exc_info') + self.error(message, **kwargs) + +logging.setLoggerClass(NovaLogger) + + +class NovaRootLogger(NovaLogger): + pass + +if not isinstance(logging.root, NovaRootLogger): + logging.root = NovaRootLogger("nova.root", WARNING) + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + + +class NovaFormatter(logging.Formatter): + """ + A nova.context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_foramt_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default""" + if record.__dict__.get('request_id', None): + self._fmt = FLAGS.logging_context_format_string + else: + self._fmt = FLAGS.logging_default_format_string + if record.levelno == logging.DEBUG \ + and FLAGS.logging_debug_format_suffix: + self._fmt += " " + FLAGS.logging_debug_format_suffix + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with FLAGS.logging_exception_prefix""" + if not record: + return logging.Formatter.formatException(self, exc_info) + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split("\n") + stringbuffer.close() + formatted_lines = [] + for line in lines: + pl = FLAGS.logging_exception_prefix % record.__dict__ + fl = "%s%s" % (pl, line) + formatted_lines.append(fl) + return "\n".join(formatted_lines) + +_formatter = NovaFormatter() + + +def audit(msg, *args, **kwargs): + """Shortcut for logging to root log with sevrity 'AUDIT'.""" + if len(logging.root.handlers) == 0: + basicConfig() + logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/network/__init__.py b/nova/network/__init__.py index dcc54db094a8..6eb3e3ef6cbb 100644 --- a/nova/network/__init__.py +++ b/nova/network/__init__.py @@ -16,17 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -:mod:`nova.network` -- Network Nodes -===================================================== - -.. automodule:: nova.network - :platform: Unix - :synopsis: Network is responsible for managing networking -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" +from nova.network.api import API diff --git a/nova/network/api.py b/nova/network/api.py new file mode 100644 index 000000000000..bf43acb519f5 --- /dev/null +++ b/nova/network/api.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +from nova import db +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.network') + + +class API(base.Base): + """API for interacting with the network manager.""" + + def allocate_floating_ip(self, context): + if quota.allowed_floating_ips(context, 1) < 1: + LOG.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), + context.project_id) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + + def release_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) + + def associate_floating_ip(self, context, floating_ip, fixed_ip): + if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) + floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) + + def disassociate_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + if not floating_ip.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + host = floating_ip['fixed_ip']['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "disassociate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 931a89554683..eba9502e9c92 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -17,16 +17,17 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ -import logging import os -# TODO(ja): does the definition of network_path belong here? - from nova import db from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.linux_net") + + def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) @@ -172,7 +173,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -182,7 +183,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -208,6 +209,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) + _execute("sudo iptables -N nova-local", check_exit_code=False) + _confirm_rule("FORWARD", "-j nova-local") def get_dhcp_hosts(context, network_id): @@ -248,9 +251,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -270,7 +273,7 @@ def _host_dhcp(fixed_ip_ref): def _execute(cmd, *args, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, *args, **kwargs) @@ -328,7 +331,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): diff --git a/nova/network/manager.py b/nova/network/manager.py index 16aa8f895951..fd286f2100ec 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of """ import datetime -import logging import math import socket @@ -55,11 +54,13 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils from nova import rpc +LOG = logging.getLogger("nova.network.manager") FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -131,7 +132,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug(_("setting network host")) + LOG.debug(_("setting network host"), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -186,7 +187,7 @@ class NetworkManager(manager.Manager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - logging.debug("Leasing IP %s", address) + LOG.debug(_("Leasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -201,12 +202,12 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn(_("IP %s leased that was already deallocated"), - address) + LOG.warn(_("IP %s leased that was already deallocated"), address, + context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - logging.debug("Releasing IP %s", address) + LOG.debug("Releasing IP %s", address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -216,7 +217,8 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn(_("IP %s released that was not leased"), address) + LOG.warn(_("IP %s released that was not leased"), address, + context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -437,7 +439,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 52257f69fce1..bc26fd3c551f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -39,7 +39,6 @@ S3 client with this module:: import datetime import json -import logging import multiprocessing import os import urllib @@ -54,12 +53,14 @@ from twisted.web import static from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.objectstore import bucket from nova.objectstore import image +LOG = logging.getLogger('nova.objectstore.handler') FLAGS = flags.FLAGS flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.') @@ -132,9 +133,11 @@ def get_context(request): request.uri, headers=request.getAllHeaders(), check_type='s3') - return context.RequestContext(user, project) + rv = context.RequestContext(user, project) + LOG.audit(_("Authenticated request"), context=rv) + return rv except exception.Error as ex: - logging.debug(_("Authentication Failure: %s"), ex) + LOG.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -176,7 +179,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" - logging.debug('List of buckets requested') + LOG.debug(_('List of buckets requested'), context=request.context) buckets = [b for b in bucket.Bucket.all() \ if b.is_authorized(request.context)] @@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource): def render_GET(self, request): "Returns the keys for the bucket resource""" - logging.debug("List keys for bucket %s", self.name) + LOG.debug(_("List keys for bucket %s"), self.name) try: bucket_object = bucket.Bucket(self.name) @@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource): return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to access bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() prefix = get_argument(request, "prefix", u"") @@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug(_("Creating bucket %s"), self.name) - logging.debug("calling bucket.Bucket.create(%r, %r)", + LOG.debug(_("Creating bucket %s"), self.name) + LOG.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) bucket.Bucket.create(self.name, request.context) @@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug(_("Deleting bucket %s"), self.name) + LOG.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() bucket_object.delete() @@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Getting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to get object %s from bucket " + "%s"), self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Putting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to upload object %s to bucket " + "%s"), + self.name, self.bucket.name, context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug(_("Deleting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + context=request.context) if not self.bucket.is_authorized(request.context): + LOG.audit("Unauthorized attempt to delete object %s from " + "bucket %s", self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -379,13 +390,21 @@ class ImagesResource(resource.Resource): image_path = os.path.join(FLAGS.images_path, image_id) if not image_path.startswith(FLAGS.images_path) or \ os.path.exists(image_path): + LOG.audit(_("Not authorized to upload image: invalid directory " + "%s"), + image_path, context=request.context) raise exception.NotAuthorized() bucket_object = bucket.Bucket(image_location.split("/")[0]) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Not authorized to upload image: unauthorized " + "bucket %s"), bucket_object.name, + context=request.context) raise exception.NotAuthorized() + LOG.audit(_("Starting image upload: %s"), image_id, + context=request.context) p = multiprocessing.Process(target=image.Image.register_aws_image, args=(image_id, image_location, request.context)) p.start() @@ -398,17 +417,21 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug(_("not authorized for render_POST in images")) + LOG.audit(_("Not authorized to update attributes of image %s"), + image_id, context=request.context) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug(_("handling publicity toggle")) - image_object.set_public(operation == 'add') + newstatus = (operation == 'add') + LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, + newstatus, context=request.context) + image_object.set_public(newstatus) else: # other attributes imply update - logging.debug(_("update user fields")) + LOG.audit(_("Updating user fields on image %s"), image_id, + context=request.context) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] @@ -421,9 +444,12 @@ class ImagesResource(resource.Resource): image_object = image.Image(image_id) if not image_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete image %s"), + image_id, context=request.context) raise exception.NotAuthorized() image_object.delete() + LOG.audit(_("Deleted image: %s"), image_id, context=request.context) request.setResponseCode(204) return '' diff --git a/nova/quota.py b/nova/quota.py index f6ca9f77c733..3884eb3081a0 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips from nova import db from nova import exception from nova import flags -from nova.compute import instance_types FLAGS = flags.FLAGS @@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus'] - num_cores = num_instances * type_cores + num_cores = num_instances * instance_type['vcpus'] allowed_instances = min(allowed_instances, - int(allowed_cores // type_cores)) + int(allowed_cores // instance_type['vcpus'])) return min(num_instances, allowed_instances) diff --git a/nova/rpc.py b/nova/rpc.py index 844088348633..49b11602bdeb 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -22,7 +22,6 @@ No fan-out support yet. """ import json -import logging import sys import time import traceback @@ -36,13 +35,12 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags +from nova import log as logging from nova import utils FLAGS = flags.FLAGS - -LOG = logging.getLogger('amqplib') -LOG.setLevel(logging.DEBUG) +LOG = logging.getLogger('nova.rpc') class Connection(carrot_connection.BrokerConnection): @@ -91,15 +89,16 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + LOG.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( + FLAGS.rabbit_host, + FLAGS.rabbit_port, + FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception(_("Unable to connect to AMQP server" - " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) + LOG.exception(_("Unable to connect to AMQP server " + "after %d tries. Shutting down."), + FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +115,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error(_("Reconnected to queue")) + LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception(_("Failed to fetch message from queue")) + LOG.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: + logging.exception("Exception during message handling") if msg_id: msg_reply(msg_id, None, sys.exc_info()) return @@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error(_("Returning exception %s to caller"), message) - logging.error(tb) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance(True) publisher = DirectPublisher(connection=conn, msg_id=msg_id) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 08d7033f5ecf..66e46c1b9b5a 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -37,6 +37,11 @@ class NoValidHost(exception.Error): pass +class WillNotSchedule(exception.Error): + """The specified host is not up or doesn't exist.""" + pass + + class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 44e21f2fdda7..a4d6dd574e8a 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -21,15 +21,16 @@ Scheduler Service """ -import logging import functools from nova import db from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils +LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', 'nova.scheduler.chance.ChanceScheduler', @@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index f9171ab35d4d..47baf0d737b4 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) + if instance_ref['availability_zone'] and context.is_admin: + zone, _x, host = instance_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-compute') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s is not alive" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.instance_update(context, instance_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result @@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) + if (':' in volume_ref['availability_zone']) and context.is_admin: + zone, _x, host = volume_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s not available" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_volume_sorted(context) for result in results: (service, volume_gigabytes) = result diff --git a/nova/service.py b/nova/service.py index f1f90742f845..523c1a8d7389 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,17 +21,20 @@ Generic Node baseclass for all workers that run on hosts """ import inspect -import logging import os import sys +import time from eventlet import event from eventlet import greenthread from eventlet import greenpool +from sqlalchemy.exc import OperationalError + from nova import context from nova import db from nova import exception +from nova import log as logging from nova import flags from nova import rpc from nova import utils @@ -151,7 +154,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn(_("Starting %s node"), topic) + logging.audit(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -204,22 +207,29 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) + try: + # NOTE(vish): This is late-loaded to make sure that the + # database is not created before flags have + # been loaded. + from nova.db.sqlalchemy import models + models.register_models() + except OperationalError: + logging.exception(_("Data store %s is unreachable." + " Trying again in %d seconds.") % + (FLAGS.sql_connection, + FLAGS.sql_retry_interval)) + time.sleep(FLAGS.sql_retry_interval) + def serve(*services): - argv = FLAGS(sys.argv) + FLAGS(sys.argv) + logging.basicConfig() if not services: services = [Service.create()] name = '_'.join(x.binary for x in services) - logging.debug("Serving %s" % name) - - logging.getLogger('amqplib').setLevel(logging.WARN) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) + logging.debug(_("Serving %s"), name) logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 961431154e49..291a0e46819d 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -113,7 +113,7 @@ def stub_out_networking(stubs): def stub_out_compute_api_snapshot(stubs): def snapshot(self, context, instance_id, name): return 123 - stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot) + stubs.Set(nova.compute.API, 'snapshot', snapshot) def stub_out_glance(stubs, initial_fixtures=[]): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 0f274bd153a5..f5be9c94fb67 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import json import datetime -import logging import unittest import stubout diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 70ff714e61db..6e611a55d4ae 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,8 +56,8 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id, internal_id=id) + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id) def fake_compute_api(cls, req, id): @@ -76,8 +76,7 @@ class ServersTest(unittest.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) - self.stubs.Set(nova.db.api, 'instance_get_by_internal_id', - return_server) + self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', @@ -87,18 +86,12 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) - self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'resume', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics", - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions", - fake_compute_api) + self.stubs.Set(nova.compute.API, 'pause', fake_compute_api) + self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api) + self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api) + self.stubs.Set(nova.compute.API, 'resume', fake_compute_api) + self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api) + self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -109,7 +102,7 @@ class ServersTest(unittest.TestCase): req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(nova.api.API('os')) res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') def test_get_server_list(self): @@ -126,7 +119,7 @@ class ServersTest(unittest.TestCase): def test_create_instance(self): def instance_create(context, inst): - return {'id': 1, 'internal_id': 1, 'display_name': ''} + return {'id': '1', 'display_name': ''} def server_update(context, id, params): return instance_create(context, id) diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py new file mode 100644 index 000000000000..3980ae3cb2f9 --- /dev/null +++ b/nova/tests/hyperv_unittest.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Hyper-V driver +""" + +import random + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova.auth import manager +from nova.virt import hyperv + +FLAGS = flags.FLAGS +FLAGS.connection_type = 'hyperv' + + +class HyperVTestCase(test.TestCase): + """Test cases for the Hyper-V driver""" + def setUp(self): + super(HyperVTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) + + def test_create_destroy(self): + """Create a VM and destroy it""" + instance = {'internal_id': random.randint(1, 1000000), + 'memory_mb': '1024', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'instance_type': 'm1.small'} + instance_ref = db.instance_create(self.context, instance) + + conn = hyperv.get_connection(False) + conn._create_vm(instance_ref) # pylint: disable-msg=W0212 + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 1) + info = conn.get_info(instance_ref['name']) + #Unfortunately since the vm is not running at this point, + #we cannot obtain memory information from get_info + self.assertEquals(info['num_cpu'], instance_ref['vcpus']) + + conn.destroy(instance_ref) + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 0) + + def tearDown(self): + super(HyperVTestCase, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index ceac17adb6c6..da86e6e117bd 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -23,7 +23,6 @@ Unittets for S3 objectstore clone. import boto import glob import hashlib -import logging import os import shutil import tempfile @@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase): self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), images_path=os.path.join(OSS_TEMPDIR, 'images'), ca_path=os.path.join(os.path.dirname(__file__), 'CA')) - logging.getLogger().setLevel(logging.DEBUG) self.auth_manager = manager.AuthManager() self.auth_manager.create_user('user1') diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py index 58fdea3b5c3e..0929903cf054 100644 --- a/nova/tests/test_access.py +++ b/nova/tests/test_access.py @@ -17,7 +17,6 @@ # under the License. import unittest -import logging import webob from nova import context diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 15d40bc5374a..35ffffb67f9e 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -16,17 +16,18 @@ # License for the specific language governing permissions and limitations # under the License. -import logging from M2Crypto import X509 import unittest from nova import crypto from nova import flags +from nova import log as logging from nova import test from nova.auth import manager from nova.api.ec2 import cloud FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.auth_unittest') class user_generator(object): @@ -211,12 +212,12 @@ class AuthManagerTestCase(object): # NOTE(vish): Setup runs genroot.sh if it hasn't been run cloud.CloudController().setup() _key, cert_str = crypto.generate_x509_cert(user.id, project.id) - logging.debug(cert_str) + LOG.debug(cert_str) full_chain = crypto.fetch_ca(project_id=project.id, chain=True) int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain) signed_cert = X509.load_cert_string(cert_str) chain_cert = X509.load_cert_string(full_chain) int_cert = X509.load_cert_string(int_cert) @@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap if FLAGS.flush_db: - logging.info("Flushing datastore") + LOG.info("Flushing datastore") r = fakeldap.Store.instance() r.flushdb() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 8e33912266a3..76a620406cb3 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -18,7 +18,6 @@ from base64 import b64decode import json -import logging from M2Crypto import BIO from M2Crypto import RSA import os @@ -31,6 +30,7 @@ from nova import context from nova import crypto from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import service from nova import test @@ -41,6 +41,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.cloud') # Temp dirs for working with image attributes through the cloud controller # (stole this from objectstore_unittest.py) @@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase): images_path=IMAGES_PATH) self.conn = rpc.Connection.instance() - logging.getLogger().setLevel(logging.DEBUG) # set up our cloud self.cloud = cloud.CloudController() @@ -106,7 +106,7 @@ class CloudTestCase(test.TestCase): self.cloud.allocate_address(self.context) inst = db.instance_create(self.context, {'host': FLAGS.host}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) @@ -127,12 +127,29 @@ class CloudTestCase(test.TestCase): result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['ec2_id']]) + volume_id=[vol2['id']]) self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) + self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_describe_instances(self): + """Makes sure describe_instances works and filters results.""" + inst1 = db.instance_create(self.context, {'reservation_id': 'a'}) + inst2 = db.instance_create(self.context, {'reservation_id': 'a'}) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 2) + instance_id = cloud.id_to_ec2_id(inst2['id']) + result = self.cloud.describe_instances(self.context, + instance_id=[instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + self.assertEqual(result['instancesSet'][0]['instanceId'], + instance_id) + db.instance_destroy(self.context, inst1['id']) + db.instance_destroy(self.context, inst2['id']) + def test_console_output(self): image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -140,15 +157,15 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, + output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): kwargs = {'image_id': image_id } @@ -191,7 +208,7 @@ class CloudTestCase(test.TestCase): def test_run_instances(self): if FLAGS.connection_type == 'fake': - logging.debug("Can't test instances without a real virtual env.") + LOG.debug(_("Can't test instances without a real virtual env.")) return image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -199,30 +216,30 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) # TODO: check for proper response instance_id = rv['reservationSet'][0].keys()[0] instance = rv['reservationSet'][0][instance_id][0] - logging.debug("Need to watch instance %s until it's running..." % - instance['instance_id']) + LOG.debug(_("Need to watch instance %s until it's running..."), + instance['instance_id']) while True: greenthread.sleep(1) info = self.cloud._get_instance(instance['instance_id']) - logging.debug(info['state']) + LOG.debug(info['state']) if info['state'] == power_state.RUNNING: break self.assert_(rv) - if connection_type != 'fake': + if FLAGS.connection_type != 'fake': time.sleep(45) # Should use boto for polling here for reservations in rv['reservationSet']: # for res_id in reservations.keys(): - # logging.debug(reservations[res_id]) + # LOG.debug(reservations[res_id]) # for instance in reservations[res_id]: for instance in reservations[reservations.keys()[0]]: instance_id = instance['instance_id'] - logging.debug("Terminating instance %s" % instance_id) - rv = yield self.compute.terminate_instance(instance_id) + LOG.debug(_("Terminating instance %s"), instance_id) + rv = self.compute.terminate_instance(instance_id) def test_instance_update_state(self): def instance(num): @@ -309,7 +326,7 @@ class CloudTestCase(test.TestCase): def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index d6cb33fe2070..52660ee7468d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -20,31 +20,31 @@ Tests For Compute """ import datetime -import logging +from nova import compute from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import test from nova import utils from nova.auth import manager -from nova.compute import api as compute_api FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.compute') class ComputeTestCase(test.TestCase): """Test case for compute""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') @@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, **instance) try: self.assertNotEqual(ref[0].display_name, None) @@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(self.context, ref[0]['id']) def test_create_instance_associates_security_groups(self): - """Make sure create_instances associates security groups""" + """Make sure create associates security groups""" values = {'name': 'default', 'description': 'default', 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, security_group=['default']) try: self.assertEqual(len(ref[0]['security_groups']), 1) @@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("Running instances: %s"), instances) + LOG.info(_("Running instances: %s"), instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("After terminating instances: %s"), instances) + LOG.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): @@ -188,3 +188,22 @@ class ComputeTestCase(test.TestCase): self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + + def test_lock(self): + """ensure locked instance cannot be changed""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + + non_admin_context = context.RequestContext(None, None, False, False) + + # decorator should return False (fail) with locked nonadmin context + self.compute.lock_instance(self.context, instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) + self.assertEqual(ret_val, False) + + # decorator should return None (success) with unlocked nonadmin context + self.compute.unlock_instance(self.context, instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) + self.assertEqual(ret_val, None) + + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py new file mode 100644 index 000000000000..beb1d97cf27f --- /dev/null +++ b/nova/tests/test_log.py @@ -0,0 +1,110 @@ +import cStringIO + +from nova import context +from nova import log +from nova import test + + +def _fake_context(): + return context.RequestContext(1, 1) + + +class RootLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(RootLoggerTestCase, self).setUp() + self.log = log.logging.root + + def tearDown(self): + super(RootLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_is_nova_instance(self): + self.assert_(isinstance(self.log, log.NovaLogger)) + + def test_name_is_nova_root(self): + self.assertEqual("nova.root", self.log.name) + + def test_handlers_have_nova_formatter(self): + formatters = [] + for h in self.log.handlers: + f = h.formatter + if isinstance(f, log.NovaFormatter): + formatters.append(f) + self.assert_(formatters) + self.assertEqual(len(formatters), len(self.log.handlers)) + + def test_handles_context_kwarg(self): + self.log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_methods_handle_context_arg(self): + log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_audit_handles_context_arg(self): + log.audit("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + +class NovaFormatterTestCase(test.TrialTestCase): + def setUp(self): + super(NovaFormatterTestCase, self).setUp() + self.flags(logging_context_format_string="HAS CONTEXT "\ + "[%(request_id)s]: %(message)s", + logging_default_format_string="NOCTXT: %(message)s", + logging_debug_format_suffix="--DBG") + self.log = log.logging.root + self.stream = cStringIO.StringIO() + handler = log.StreamHandler(self.stream) + self.log.addHandler(handler) + self.log.setLevel(log.DEBUG) + + def tearDown(self): + super(NovaFormatterTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_uncontextualized_log(self): + self.log.info("foo") + self.assertEqual("NOCTXT: foo\n", self.stream.getvalue()) + + def test_contextualized_log(self): + ctxt = _fake_context() + self.log.info("bar", context=ctxt) + expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id + self.assertEqual(expected, self.stream.getvalue()) + + def test_debugging_log(self): + self.log.debug("baz") + self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue()) + + +class NovaLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(NovaLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False) + self.log = log.getLogger('nova-test') + + def tearDown(self): + super(NovaLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_has_level_from_flags(self): + self.assertEqual(log.AUDIT, self.log.level) + + def test_child_log_has_level_of_parent_flag(self): + l = log.getLogger('nova-test.foo') + self.assertEqual(log.AUDIT, l.level) + + +class VerboseLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(VerboseLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True) + self.log = log.getLogger('nova.test') + + def tearDown(self): + super(VerboseLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self): + self.assertEqual(log.DEBUG, self.log.level) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 96473ac7c4bc..349e20f84150 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -20,18 +20,18 @@ Unit Tests for network code """ import IPy import os -import logging from nova import context from nova import db from nova import exception from nova import flags -from nova import service +from nova import log as logging from nova import test from nova import utils from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.network') class NetworkTestCase(test.TestCase): @@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase): fake_network=True, network_size=16, num_networks=5) - logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] @@ -328,7 +327,7 @@ def lease_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s ", out, err) + LOG.debug("ISSUE_IP: %s, %s ", out, err) def release_ip(private_ip): @@ -344,4 +343,4 @@ def release_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s ", out, err) + LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 8cf2a5e54652..9548a8c13b22 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,17 +16,15 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from nova import context from nova import db -from nova import exception from nova import flags from nova import quota from nova import test from nova import utils from nova.auth import manager from nova.api.ec2 import cloud +from nova.compute import instance_types FLAGS = flags.FLAGS @@ -34,7 +32,6 @@ FLAGS = flags.FLAGS class QuotaTestCase(test.TestCase): def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(QuotaTestCase, self).setUp() self.flags(connection_type='fake', quota_instances=2, @@ -78,14 +75,17 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 6ea2edcab348..85593ab46370 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -18,15 +18,16 @@ """ Unit Tests for remote procedure calls using queue """ -import logging from nova import context from nova import flags +from nova import log as logging from nova import rpc from nova import test FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): @@ -85,12 +86,12 @@ class RpcTestCase(test.TestCase): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" - logging.debug("Nested received %s, %s", queue, value) + LOG.debug(_("Nested received %s, %s"), queue, value) ret = rpc.call(context, queue, {"method": "echo", "args": {"value": value}}) - logging.debug("Nested return %s", ret) + LOG.debug(_("Nested return %s"), ret) return value nested = Nested() @@ -115,13 +116,13 @@ class TestReceiver(object): @staticmethod def echo(context, value): """Simply returns whatever value is sent in""" - logging.debug("Received %s", value) + LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): """Returns dictionary version of context""" - logging.debug("Received %s", context) + LOG.debug(_("Received %s"), context) return context.to_dict() @staticmethod diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 78e4a1c77d55..a9937d79743c 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -19,6 +19,8 @@ Tests For Scheduler """ +import datetime + from nova import context from nova import db from nova import flags @@ -95,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase): self.manager.delete_user(self.user) self.manager.delete_project(self.project) - def _create_instance(self): + def _create_instance(self, **kwargs): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -106,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase): inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst['vcpus'] = 1 + inst['availability_zone'] = kwargs.get('availability_zone', None) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -114,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase): vol['image_id'] = 'ami-test' vol['reservation_id'] = 'r-fakeres' vol['size'] = 1 + vol['availability_zone'] = 'test' return db.volume_create(self.context, vol)['id'] - def test_hosts_are_up(self): + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): """Ensures driver can find the hosts that are up""" # NOTE(vish): constructing service without create method # because we are going to use it without queue @@ -131,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute2.start() hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) + self.assertEqual(2, len(hosts)) compute1.kill() compute2.kill() @@ -158,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + def test_too_many_cores(self): """Ensures we don't go over max cores""" compute1 = service.Service('host1', diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index b30838ad738c..9f1a181a0a12 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue import mox +from nova import context +from nova import db from nova import exception from nova import flags from nova import rpc @@ -72,6 +74,30 @@ class ServiceManagerTestCase(test.TestCase): self.assertEqual(serv.test_method(), 'service') +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + + class ServiceTestCase(test.TestCase): """Test cases for Services""" diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4aa489d08f01..59053f4d0368 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -208,8 +208,99 @@ class LibvirtConnTestCase(test.TestCase): self.manager.delete_user(self.user) -class NWFilterTestCase(test.TestCase): +class IptablesFirewallTestCase(test.TestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext('fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + self.fw = libvirt_conn.IptablesFirewallDriver() + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(IptablesFirewallTestCase, self).tearDown() + + def _p(self, *args, **kwargs): + if 'iptables-restore' in args: + print ' '.join(args), kwargs['stdin'] + if 'iptables-save' in args: + return + + in_rules = [ + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010' + ] + + def test_static_filters(self): + self.fw.execute = self._p + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testgroup', + 'description': 'test group'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + + db.instance_add_security_group(admin_ctxt, instance_ref['id'], + secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + + self.fw.add_instance(instance_ref) + + out_rules = self.fw.modify_rules(self.in_rules) + + in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) + for rule in in_rules: + if not 'nova' in rule: + self.assertTrue(rule in out_rules, + 'Rule went missing: %s' % rule) + + print '\n'.join(out_rules) + + +class NWFilterTestCase(test.TestCase): def setUp(self): super(NWFilterTestCase, self).setUp() @@ -224,7 +315,8 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + self.fw = libvirt_conn.NWFilterFirewall( + lambda: self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) @@ -337,7 +429,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_base_nwfilters() - self.fw.setup_nwfilters_for_instance(instance) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index b13455fb07e6..b40ca004b6c1 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -19,23 +19,23 @@ Tests for Volume Code. """ -import logging from nova import context from nova import exception from nova import db from nova import flags +from nova import log as logging from nova import test from nova import utils FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.volume') class VolumeTestCase(test.TestCase): """Test Case for volumes.""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') @@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase): volume_id) self.assert_(iscsi_target not in targets) targets.append(iscsi_target) - logging.debug("Target %s allocated", iscsi_target) + LOG.debug(_("Target %s allocated"), iscsi_target) total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index c95a53af3c65..ec9462adafe7 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,8 +79,8 @@ class XenAPIVolumeTestCase(test.TestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = 'SR-%s' % vol['ec2_id'] + info = helper.parse_volume_info(vol['id'], '/dev/sdc') + label = 'SR-%s' % vol['id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = xenapi_fake.get_all('SR') @@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase): # oops, wrong mount point! self.assertRaises(volume_utils.StorageError, helper.parse_volume_info, - vol['ec2_id'], + vol['id'], '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) @@ -108,8 +108,7 @@ class XenAPIVolumeTestCase(test.TestCase): volume = self._create_volume() instance = db.instance_create(self.values) xenapi_fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') + result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it @@ -134,7 +133,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.assertRaises(Exception, conn.attach_volume, instance.name, - volume['ec2_id'], + volume['id'], '/dev/sdc') def tearDown(self): @@ -250,15 +249,16 @@ class XenAPIVMTestCase(test.TestCase): def _create_instance(self): """Creates and spawns a test instance""" - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff' - } + values = { + 'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff'} instance = db.instance_create(values) self.conn.spawn(instance) return instance diff --git a/nova/twistd.py b/nova/twistd.py index 29be9c4e1457..5562719997fa 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -22,7 +22,6 @@ manage pid files and support syslogging. """ import gflags -import logging import os import signal import sys @@ -34,6 +33,7 @@ from twisted.python import runtime from twisted.python import usage from nova import flags +from nova import log as logging if runtime.platformType == "win32": @@ -234,22 +234,12 @@ def serve(filename): OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() argv = options.parseOptions() - logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) if not FLAGS.prefix: FLAGS.prefix = name elif FLAGS.prefix.endswith('twisted'): @@ -270,19 +260,10 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - formatter = logging.Formatter( - '(%(name)s): %(levelname)s %(message)s') - handler = logging.StreamHandler(log.StdioOnnaStick()) - handler.setFormatter(formatter) - logging.getLogger().addHandler(handler) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - + logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + logging.audit(_("Starting %s"), name) twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 8cbf59b2dc9d..21b12aaa98aa 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -22,7 +22,6 @@ System-level utilities and helper functions. import datetime import inspect -import logging import os import random import subprocess @@ -37,8 +36,10 @@ from eventlet import greenthread from nova import exception from nova.exception import ProcessExecutionError +from nova import log as logging +LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -109,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - logging.debug(_("Fetching %s") % url) + LOG.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -121,7 +122,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug(_("Running cmd (subprocess): %s"), cmd) + LOG.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -134,7 +135,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -172,12 +173,12 @@ def default_flagfile(filename='nova.conf'): def debug(arg): - logging.debug('debug in callback: %s', arg) + LOG.debug(_('debug in callback: %s'), arg) return arg def runthis(prompt, cmd, check_exit_code=True): - logging.debug(_("Running %s") % (cmd)) + LOG.debug(_("Running %s"), (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -208,7 +209,7 @@ def get_my_ip(): csock.close() return addr except socket.gaierror as ex: - logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) + LOG.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) return "127.0.0.1" @@ -301,7 +302,7 @@ class LazyPluggable(object): fromlist = backend self.__backend = __import__(name, None, None, fromlist) - logging.info('backend %s', self.__backend) + LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): diff --git a/nova/version.py b/nova/version.py new file mode 100644 index 000000000000..7b27acb6a554 --- /dev/null +++ b/nova/version.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from nova.vcsversion import version_info +except ImportError: + version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + +NOVA_VERSION = ['2011', '1'] +YEAR, COUNT = NOVA_VERSION + +FINAL = False # This becomes true at Release Candidate time + + +def canonical_version_string(): + return '.'.join([YEAR, COUNT]) + + +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 61e99944ef38..13181b7303aa 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -19,15 +19,17 @@ """Abstraction of the underlying virtualization API.""" -import logging import sys from nova import flags +from nova import log as logging from nova.virt import fake from nova.virt import libvirt_conn from nova.virt import xenapi_conn +from nova.virt import hyperv +LOG = logging.getLogger("nova.virt.connection") FLAGS = flags.FLAGS @@ -62,10 +64,12 @@ def get_connection(read_only=False): conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': conn = xenapi_conn.get_connection(read_only) + elif t == 'hyperv': + conn = hyperv.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error(_('Failed to open connection to the hypervisor')) + LOG.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py new file mode 100644 index 000000000000..d71387ac0f7e --- /dev/null +++ b/nova/virt/hyperv.py @@ -0,0 +1,462 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to Hyper-V . +Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V +Hyper-V WMI usage: + http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx +The Hyper-V object model briefly: + The physical computer and its hosted virtual machines are each represented + by the Msvm_ComputerSystem class. + + Each virtual machine is associated with a + Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more + Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting + there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. + The rasd objects describe the settings for each device in a VM. + Together, the vs_gs_data, vmsettings and rasds describe the configuration + of the virtual machine. + + Creating new resources such as disks and nics involves cloning a default + rasd object and appropriately modifying the clone and calling the + AddVirtualSystemResources WMI method + Changing resources such as memory uses the ModifyVirtualSystemResources + WMI method + +Using the Python WMI library: + Tutorial: + http://timgolden.me.uk/python/wmi/tutorial.html + Hyper-V WMI objects can be retrieved simply by using the class name + of the WMI object and optionally specifying a column to filter the + result set. More complex filters can be formed using WQL (sql-like) + queries. + The parameters and return tuples of WMI method calls can gleaned by + examining the doc string. For example: + >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ + ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) + => (Job, ReturnValue)' + When passing setting data (ResourceSettingData) to the WMI method, + an XML representation of the data is passed in using GetText_(1). + Available methods on a service can be determined using method.keys(): + >>> vs_man_svc.methods.keys() + vmsettings and rasds for a vm can be retrieved using the 'associators' + method with the appropriate return class. + Long running WMI commands generally return a Job (an instance of + Msvm_ConcreteJob) whose state can be polled to determine when it finishes + +""" + +import os +import time + +from nova import exception +from nova import flags +from nova import log as logging +from nova.auth import manager +from nova.compute import power_state +from nova.virt import images + +wmi = None + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger('nova.virt.hyperv') + + +HYPERV_POWER_STATE = { + 3: power_state.SHUTDOWN, + 2: power_state.RUNNING, + 32768: power_state.PAUSED, +} + + +REQ_POWER_STATE = { + 'Enabled': 2, + 'Disabled': 3, + 'Reboot': 10, + 'Reset': 11, + 'Paused': 32768, + 'Suspended': 32769 +} + + +WMI_JOB_STATUS_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + +def get_connection(_): + global wmi + if wmi is None: + wmi = __import__('wmi') + return HyperVConnection() + + +class HyperVConnection(object): + def __init__(self): + self._conn = wmi.WMI(moniker='//./root/virtualization') + self._cim_conn = wmi.WMI(moniker='//./root/cimv2') + + def init_host(self): + #FIXME(chiradeep): implement this + LOG.debug(_('In init host')) + pass + + def list_instances(self): + """ Return the names of all the instances known to Hyper-V. """ + vms = [v.ElementName \ + for v in self._conn.Msvm_ComputerSystem(['ElementName'])] + return vms + + def spawn(self, instance): + """ Create a new VM and start it.""" + vm = self._lookup(instance.name) + if vm is not None: + raise exception.Duplicate(_('Attempt to create duplicate vm %s') % + instance.name) + + user = manager.AuthManager().get_user(instance['user_id']) + project = manager.AuthManager().get_project(instance['project_id']) + #Fetch the file, assume it is a VHD file. + base_vhd_filename = os.path.join(FLAGS.instances_path, + instance.name) + vhdfile = "%s.vhd" % (base_vhd_filename) + images.fetch(instance['image_id'], vhdfile, user, project) + + try: + self._create_vm(instance) + + self._create_disk(instance['name'], vhdfile) + self._create_nic(instance['name'], instance['mac_address']) + + LOG.debug(_('Starting VM %s '), instance.name) + self._set_vm_state(instance['name'], 'Enabled') + LOG.info(_('Started VM %s '), instance.name) + except Exception as exn: + LOG.exception(_('spawn vm failed: %s'), exn) + self.destroy(instance) + + def _create_vm(self, instance): + """Create a VM but don't start it. """ + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() + vs_gs_data.ElementName = instance['name'] + (job, ret_val) = vs_man_svc.DefineVirtualSystem( + [], None, vs_gs_data.GetText_(1))[1:] + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + + if not success: + raise Exception(_('Failed to create VM %s'), instance.name) + + LOG.debug(_('Created VM %s...'), instance.name) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + vmsetting = [s for s in vmsettings + if s.SettingType == 3][0] # avoid snapshots + memsetting = vmsetting.associators( + wmi_result_class='Msvm_MemorySettingData')[0] + #No Dynamic Memory, so reservation, limit and quantity are identical. + mem = long(str(instance['memory_mb'])) + memsetting.VirtualQuantity = mem + memsetting.Reservation = mem + memsetting.Limit = mem + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [memsetting.GetText_(1)]) + LOG.debug(_('Set memory for vm %s...'), instance.name) + procsetting = vmsetting.associators( + wmi_result_class='Msvm_ProcessorSettingData')[0] + vcpus = long(instance['vcpus']) + procsetting.VirtualQuantity = vcpus + procsetting.Reservation = vcpus + procsetting.Limit = vcpus + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [procsetting.GetText_(1)]) + LOG.debug(_('Set vcpus for vm %s...'), instance.name) + + def _create_disk(self, vm_name, vhdfile): + """Create a disk and attach it to the vm""" + LOG.debug(_('Creating disk for %s by attaching disk file %s'), + vm_name, vhdfile) + #Find the IDE controller for the vm. + vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) + vm = vms[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + ctrller = [r for r in rasds + if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ + and r.Address == "0"] + #Find the default disk drive object for the vm and clone it. + diskdflt = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] + diskdrive = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', diskdflt) + #Set the IDE ctrller as parent. + diskdrive.Parent = ctrller[0].path_() + diskdrive.Address = 0 + #Add the cloned disk drive object to the vm. + new_resources = self._add_virt_resource(diskdrive, vm) + if new_resources is None: + raise Exception(_('Failed to add diskdrive to VM %s'), + vm_name) + diskdrive_path = new_resources[0] + LOG.debug(_('New disk drive path is %s'), diskdrive_path) + #Find the default VHD disk object. + vhddefault = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \ + InstanceID LIKE '%Default%' ")[0] + + #Clone the default and point it to the image file. + vhddisk = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', vhddefault) + #Set the new drive as the parent. + vhddisk.Parent = diskdrive_path + vhddisk.Connection = [vhdfile] + + #Add the new vhd object as a virtual hard disk to the vm. + new_resources = self._add_virt_resource(vhddisk, vm) + if new_resources is None: + raise Exception(_('Failed to add vhd file to VM %s'), + vm_name) + LOG.info(_('Created disk for %s'), vm_name) + + def _create_nic(self, vm_name, mac): + """Create a (emulated) nic and attach it to the vm""" + LOG.debug(_('Creating nic for %s '), vm_name) + #Find the vswitch that is connected to the physical nic. + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + extswitch = self._find_external_network() + vm = vms[0] + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + #Find the default nic and clone it to create a new nic for the vm. + #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with + #Linux Integration Components installed. + emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() + default_nic_data = [n for n in emulatednics_data + if n.InstanceID.rfind('Default') > 0] + new_nic_data = self._clone_wmi_obj( + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) + #Create a port on the vswitch. + (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, + "", extswitch.path_()) + if ret_val != 0: + LOG.error(_('Failed creating a port on the external vswitch')) + raise Exception(_('Failed creating port for %s'), + vm_name) + LOG.debug(_("Created switch port %s on switch %s"), + vm_name, extswitch.path_()) + #Connect the new nic to the new port. + new_nic_data.Connection = [new_port] + new_nic_data.ElementName = vm_name + ' nic' + new_nic_data.Address = ''.join(mac.split(':')) + new_nic_data.StaticMacAddress = 'TRUE' + #Add the new nic to the vm. + new_resources = self._add_virt_resource(new_nic_data, vm) + if new_resources is None: + raise Exception(_('Failed to add nic to VM %s'), + vm_name) + LOG.info(_("Created nic for %s "), vm_name) + + def _add_virt_resource(self, res_setting_data, target_vm): + """Add a new resource (disk/nic) to the VM""" + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, new_resources, ret_val) = vs_man_svc.\ + AddVirtualSystemResources([res_setting_data.GetText_(1)], + target_vm.path_()) + success = True + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + if success: + return new_resources + else: + return None + + #TODO: use the reactor to poll instead of sleep + def _check_job_status(self, jobpath): + """Poll WMI job state for completion""" + #Jobs have a path of the form: + #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID= + #"8A496B9C-AF4D-4E98-BD3C-1128CD85320D" + inst_id = jobpath.split('=')[1].strip('"') + jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) + if len(jobs) == 0: + return False + job = jobs[0] + while job.JobState == WMI_JOB_STATE_RUNNING: + time.sleep(0.1) + job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] + if job.JobState != WMI_JOB_STATE_COMPLETED: + LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) + return False + LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, + job.ElapsedTime) + return True + + def _find_external_network(self): + """Find the vswitch that is connected to the physical nic. + Assumes only one physical nic on the host + """ + #If there are no physical nics connected to networks, return. + bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') + if len(bound) == 0: + return None + return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ + .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + + def _clone_wmi_obj(self, wmi_class, wmi_obj): + """Clone a WMI object""" + cl = self._conn.__getattr__(wmi_class) # get the class + newinst = cl.new() + #Copy the properties from the original. + for prop in wmi_obj._properties: + newinst.Properties_.Item(prop).Value =\ + wmi_obj.Properties_.Item(prop).Value + return newinst + + def reboot(self, instance): + """Reboot the specified instance.""" + vm = self._lookup(instance.name) + if vm is None: + raise exception.NotFound('instance not present %s' % instance.name) + self._set_vm_state(instance.name, 'Reboot') + + def destroy(self, instance): + """Destroy the VM. Also destroy the associated VHD disk files""" + LOG.debug(_("Got request to destroy vm %s"), instance.name) + vm = self._lookup(instance.name) + if vm is None: + return + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + #Stop the VM first. + self._set_vm_state(instance.name, 'Disabled') + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + disks = [r for r in rasds \ + if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] + diskfiles = [] + #Collect disk file information before destroying the VM. + for disk in disks: + diskfiles.extend([c for c in disk.Connection]) + #Nuke the VM. Does not destroy disks. + (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + if not success: + raise Exception(_('Failed to destroy vm %s') % instance.name) + #Delete associated vhd disk files. + for disk in diskfiles: + vhdfile = self._cim_conn.CIM_DataFile(Name=disk) + for vf in vhdfile: + vf.Delete() + LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) + + def get_info(self, instance_id): + """Get information about the VM""" + vm = self._lookup(instance_id) + if vm is None: + raise exception.NotFound('instance not present %s' % instance_id) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + settings_paths = [v.path_() for v in vmsettings] + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + summary_info = vs_man_svc.GetSummaryInformation( + [4, 100, 103, 105], settings_paths)[1] + info = summary_info[0] + LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ + cpu_time=%s"), instance_id, + str(HYPERV_POWER_STATE[info.EnabledState]), + str(info.MemoryUsage), + str(info.NumberOfProcessors), + str(info.UpTime)) + + return {'state': HYPERV_POWER_STATE[info.EnabledState], + 'max_mem': info.MemoryUsage, + 'mem': info.MemoryUsage, + 'num_cpu': info.NumberOfProcessors, + 'cpu_time': info.UpTime} + + def _lookup(self, i): + vms = self._conn.Msvm_ComputerSystem(ElementName=i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception(_('duplicate name found: %s') % i) + else: + return vms[0].ElementName + + def _set_vm_state(self, vm_name, req_state): + """Set the desired state of the VM""" + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + if len(vms) == 0: + return False + (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) + success = False + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + elif ret_val == 32775: + #Invalid state for current operation. Typically means it is + #already in the state requested + success = True + if success: + LOG.info(_("Successfully changed vm state of %s to %s"), vm_name, + req_state) + else: + LOG.error(_("Failed to change vm state of %s to %s"), vm_name, + req_state) + raise Exception(_("Failed to change vm state of %s to %s"), + vm_name, req_state) + + def attach_volume(self, instance_name, device_path, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot attach volume to missing %s vm' % + instance_name) + + def detach_volume(self, instance_name, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot detach volume from missing %s ' % + instance_name) diff --git a/nova/virt/images.py b/nova/virt/images.py index 1c9b2e093401..ecf0e5efbc48 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -22,10 +22,14 @@ Handling of VM disk images. """ import os.path +import shutil +import sys import time +import urllib2 import urlparse from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.auth import signer @@ -36,6 +40,8 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') +LOG = logging.getLogger('nova.virt.images') + def fetch(image, path, user, project): if FLAGS.use_s3: @@ -45,6 +51,25 @@ def fetch(image, path, user, project): return f(image, path, user, project) +def _fetch_image_no_curl(url, path, headers): + request = urllib2.Request(url) + for (k, v) in headers.iteritems(): + request.add_header(k, v) + + def urlretrieve(urlfile, fpath): + chunk = 1 * 1024 * 1024 + f = open(fpath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + break + f.write(data) + + urlopened = urllib2.urlopen(request) + urlretrieve(urlopened, path) + LOG.debug(_("Finished retreving %s -- placed in %s"), url, path) + + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -61,18 +86,24 @@ def _fetch_s3_image(image, path, user, project): url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '"%s: %s"' % (k, v)] + if sys.platform.startswith('win'): + return _fetch_image_no_curl(url, path, headers) + else: + cmd = ['/usr/bin/curl', '--fail', '--silent', url] + for (k, v) in headers.iteritems(): + cmd += ['-H', '\'%s: %s\'' % (k, v)] - cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + cmd += ['-o', path] + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): - source = _image_path('%s/image' % image) - return utils.execute('cp %s %s' % (source, path)) + source = _image_path(os.path.join(image, 'image')) + if sys.platform.startswith('win'): + return shutil.copy(source, path) + else: + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d83c57741557..dc31d83573fd 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,7 +36,6 @@ Supports KVM, QEMU, UML, and XEN. """ -import logging import os import shutil import random @@ -55,6 +54,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils #from nova.api import context from nova.auth import manager @@ -67,6 +67,7 @@ libvirt = None libxml2 = None Template = None +LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS # TODO(vish): These flags should probably go into a shared location @@ -93,6 +94,9 @@ flags.DEFINE_bool('allow_project_net_traffic', flags.DEFINE_string('ajaxterm_portrange', '10000-12000', 'Range of ports that ajaxterm should randomly try to bind') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') def get_connection(read_only): @@ -132,16 +136,24 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - def init_host(self): - NWFilterFirewall(self._conn).setup_base_nwfilters() + self.nwfilter = NWFilterFirewall(self._get_connection) - @property - def _conn(self): + if not FLAGS.firewall_driver: + self.firewall_driver = self.nwfilter + self.nwfilter.handle_security_groups = True + else: + self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + + def init_host(self): + pass + + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + _conn = property(_get_connection) def _test_connection(self): try: @@ -150,7 +162,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug(_('Connection to libvirt broke')) + LOG.debug(_('Connection to libvirt broke')) return False raise @@ -222,8 +234,8 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + LOG.info(_('instance %s: deleting instance files %s'), + instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -287,10 +299,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rebooted'), instance['name']) + LOG.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_reboot failed: %s'), exn) + LOG.exception(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -333,10 +345,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rescued'), instance['name']) + LOG.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_rescue failed: %s'), exn) + LOG.exception(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -358,10 +370,13 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + + self.nwfilter.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug(_("instance %s: is running"), instance['name']) + LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) timer = utils.LoopingCall(f=None) @@ -371,11 +386,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: booted'), instance['name']) + LOG.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -385,11 +400,11 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): - logging.info('virsh said: %r' % (virsh_output,)) + LOG.info(_('virsh said: %r'), virsh_output) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info(_('cool, it\'s a device')) + LOG.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -397,7 +412,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info(_('data: %r, fpath: %r') % (data, fpath)) + LOG.info(_('data: %r, fpath: %r'), data, fpath) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -405,7 +420,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - logging.info('Contents: %r' % (contents,)) + LOG.info(_('Contents of file %s: %r'), fpath, contents) return contents @exception.wrap_exception @@ -475,7 +490,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info(_('instance %s: Creating image'), inst['name']) + LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -531,10 +546,10 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info(_('instance %s: injecting key into image %s'), + LOG.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info(_('instance %s: injecting net into image %s'), + LOG.info(_('instance %s: injecting net into image %s'), inst['name'], inst.image_id) try: disk.inject_data(basepath('disk-raw'), key, net, @@ -542,9 +557,9 @@ class LibvirtConnection(object): execute=execute) except Exception as e: # This could be a windows image, or a vmdk format disk - logging.warn(_('instance %s: ignoring error injecting data' - ' into image %s (%s)'), - inst['name'], inst.image_id, e) + LOG.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) if inst['kernel_id']: if os.path.exists(basepath('disk')): @@ -570,8 +585,10 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug(_('instance %s: starting toXML method'), - instance['name']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) network = db.network_get_by_instance(context.get_admin_context(), instance['id']) # FIXME(vish): stick this in db @@ -613,7 +630,7 @@ class LibvirtConnection(object): xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - logging.debug(_('instance %s: finished toXML method'), + LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -734,18 +751,55 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) - def refresh_security_group(self, security_group_id): - fw = NWFilterFirewall(self._conn) - fw.ensure_security_group_filter(security_group_id) + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) -class NWFilterFirewall(object): +class FirewallDriver(object): + def prepare_instance_filter(self, instance): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store + + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): """ This class implements a network filtering mechanism versatile enough for EC2 style Security Group filtering by leveraging libvirt's nwfilter. First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + This filter drops all incoming ipv4 and ipv6 connections. Outgoing connections are never blocked. @@ -779,38 +833,79 @@ class NWFilterFirewall(object): (*) This sentence brought to you by the redundancy department of redundancy. + """ def __init__(self, get_connection): - self._conn = get_connection + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False - nova_base_filter = ''' - 26717364-50cf-42d1-8185-29bf893ab110 - - - - - - - - ''' + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) - nova_dhcp_filter = ''' - 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc - - - - - - - ''' + def nova_dhcp_filter(self): + """The standard allow-dhcp-server filter is an one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return ''' + 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc + + + + + + + ''' + + def setup_basic_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + instance_filter_name = self._instance_filter_name(instance) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''%s''' % ( + name, + ''.join(["" % (f,) for f in filters])) + return xml nova_vpn_filter = ''' 2086015e-cf03-11df-8c5d-080027c27973 @@ -824,7 +919,7 @@ class NWFilterFirewall(object): retval = "" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ <%s /> """ % (action, direction, @@ -836,7 +931,7 @@ class NWFilterFirewall(object): retval = "" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ <%s-ipv6 /> """ % (action, direction, @@ -860,43 +955,49 @@ class NWFilterFirewall(object): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def setup_base_nwfilters(self): - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_base_filter) - self._define_filter(self.nova_vpn_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) - - def setup_nwfilters_for_instance(self, instance): + def prepare_instance_filter(self, instance): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as the base filter are all in place. """ - nwfilter_xml = ("\n") % instance['name'] - if instance['image_id'] == FLAGS.vpn_image_id: - nwfilter_xml += " \n" + base_filter = 'nova-vpn' else: - nwfilter_xml += " \n" + base_filter = 'nova-base' + + instance_filter_name = self._instance_filter_name(instance) + instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) + instance_filter_children = [base_filter, instance_secgroup_filter_name] + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + + ctxt = context.get_admin_context() if FLAGS.allow_project_net_traffic: - nwfilter_xml += " \n" + instance_filter_children += ['nova-project'] - for security_group in instance.security_groups: - self.ensure_security_group_filter(security_group['id']) + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): - nwfilter_xml += (" \n") % security_group['id'] - nwfilter_xml += "" + self.refresh_security_group_rules(security_group['id']) - self._define_filter(nwfilter_xml) + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] - def ensure_security_group_filter(self, security_group_id): + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -914,9 +1015,9 @@ class NWFilterFirewall(object): rule_xml += "dstportstart='%s' dstportend='%s' " % \ (rule.from_port, rule.to_port) elif rule.protocol == 'icmp': - logging.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r' % - (rule.protocol, rule.from_port, rule.to_port)) + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) if rule.from_port != -1: rule_xml += "type='%s' " % rule.from_port if rule.to_port != -1: @@ -927,3 +1028,162 @@ class NWFilterFirewall(object): xml = "%s" % \ (security_group_id, rule_xml,) return xml + + def _instance_filter_name(self, instance): + return 'nova-instance-%s' % instance['name'] + + +class IptablesFirewallDriver(FirewallDriver): + def __init__(self, execute=None): + self.execute = execute or utils.execute + self.instances = set() + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def remove_instance(self, instance): + self.instances.remove(instance) + + def add_instance(self, instance): + self.instances.add(instance) + + def prepare_instance_filter(self, instance): + self.add_instance(instance) + self.apply_ruleset() + + def apply_ruleset(self): + current_filter, _ = self.execute('sudo iptables-save -t filter') + current_lines = current_filter.split('\n') + new_filter = self.modify_rules(current_lines) + self.execute('sudo iptables-restore', + process_input='\n'.join(new_filter)) + + def modify_rules(self, current_lines): + ctxt = context.get_admin_context() + # Remove any trace of nova rules. + new_filter = filter(lambda l: 'nova-' not in l, current_lines) + + seen_chains = False + for rules_index in range(len(new_filter)): + if not seen_chains: + if new_filter[rules_index].startswith(':'): + seen_chains = True + elif seen_chains == 1: + if not new_filter[rules_index].startswith(':'): + break + + our_chains = [':nova-ipv4-fallback - [0:0]'] + our_rules = ['-A nova-ipv4-fallback -j DROP'] + + our_chains += [':nova-local - [0:0]'] + our_rules += ['-A FORWARD -j nova-local'] + + security_groups = set() + # Add our chains + # First, we add instance chains and rules + for instance in self.instances: + chain_name = self._instance_chain_name(instance) + ip_address = self._ip_for_instance(instance) + + our_chains += [':%s - [0:0]' % chain_name] + + # Jump to the per-instance chain + our_rules += ['-A nova-local -d %s -j %s' % (ip_address, + chain_name)] + + # Always drop invalid packets + our_rules += ['-A %s -m state --state ' + 'INVALID -j DROP' % (chain_name,)] + + # Allow established connections + our_rules += ['-A %s -m state --state ' + 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)] + + # Jump to each security group chain in turn + for security_group in \ + db.security_group_get_by_instance(ctxt, + instance['id']): + security_groups.add(security_group) + + sg_chain_name = self._security_group_chain_name(security_group) + + our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)] + + # Allow DHCP responses + dhcp_server = self._dhcp_server_for_instance(instance) + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % + (chain_name, dhcp_server)] + + # If nothing matches, jump to the fallback chain + our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)] + + # then, security group chains and rules + for security_group in security_groups: + chain_name = self._security_group_chain_name(security_group) + our_chains += [':%s - [0:0]' % chain_name] + + rules = \ + db.security_group_rule_get_by_security_group(ctxt, + security_group['id']) + + for rule in rules: + logging.info('%r', rule) + args = ['-A', chain_name, '-p', rule.protocol] + + if rule.cidr: + args += ['-s', rule.cidr] + else: + # Eventually, a mechanism to grant access for security + # groups will turn up here. It'll use ipsets. + continue + + if rule.protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif rule.protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == '-1': + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == '-1': + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + args += ['-m', 'icmp', '--icmp_type', icmp_type_arg] + + args += ['-j ACCEPT'] + our_rules += [' '.join(args)] + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + logging.info('new_filter: %s', '\n'.join(new_filter)) + return new_filter + + def refresh_security_group_members(self, security_group): + pass + + def refresh_security_group_rules(self, security_group): + self.apply_ruleset() + + def _security_group_chain_name(self, security_group): + return 'nova-sg-%s' % (security_group['id'],) + + def _instance_chain_name(self, instance): + return 'nova-inst-%s' % (instance['id'],) + + def _ip_for_instance(self, instance): + return db.instance_get_fixed_address(context.get_admin_context(), + instance['id']) + + def _dhcp_server_for_instance(self, instance): + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + return network['gateway'] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index aa4026f9730a..96d8f5fc8c5e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -52,12 +52,12 @@ A fake XenAPI SDK. import datetime -import logging import uuid from pprint import pformat from nova import exception +from nova import log as logging _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ @@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ _db_content = {} +LOG = logging.getLogger("nova.virt.xenapi.fake") + def log_db_contents(msg=None): - logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) def reset(): @@ -242,9 +244,9 @@ class SessionBase(object): full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s' % + _('xenapi.fake does not have an implementation for %s') % methodname) return meth(*full_params) @@ -278,12 +280,12 @@ class SessionBase(object): if impl is not None: def callit(*params): - logging.warn('Calling %s %s', name, impl) + LOG.debug(_('Calling %s %s'), name, impl) self._check_session(params) return impl(*params) return callit if self._is_gettersetter(name, True): - logging.warn('Calling getter %s', name) + LOG.debug(_('Calling getter %s'), name) return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) @@ -333,10 +335,10 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - logging.error('Raising NotImplemented') + LOG.debuug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments' % name) + _('xenapi.fake does not have an implementation for %s or it has ' + 'been called with the wrong number of arguments') % name) def _setter(self, name, params): self._check_session(params) @@ -351,7 +353,7 @@ class SessionBase(object): field in _db_content[cls][ref]): _db_content[cls][ref][field] = val - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( 'xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments or the database ' @@ -399,7 +401,7 @@ class SessionBase(object): self._session not in _db_content['session']): raise Failure(['HANDLE_INVALID', 'session', self._session]) if len(params) == 0 or params[0] != self._session: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError('Call to XenAPI without using .xenapi') def _check_arg_count(self, params, expected): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848973..1e9448a269ae 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import logging import pickle import urllib from xml.dom import minidom @@ -27,6 +26,7 @@ from xml.dom import minidom from eventlet import event from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types @@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.vm_utils") XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -121,9 +122,9 @@ class VMHelper(HelperBase): rec['HVM_boot_params'] = {'order': 'dc'} rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} - logging.debug('Created VM %s...', instance.name) + LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) return vm_ref @classmethod @@ -143,10 +144,9 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VM %s, VDI %s ... '), - vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, + LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, vdi_ref) return vbd_ref @@ -161,7 +161,7 @@ class VMHelper(HelperBase): if vbd_rec['userdevice'] == str(number): return vbd except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) @classmethod @@ -170,7 +170,7 @@ class VMHelper(HelperBase): try: vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': raise StorageError(_('Unable to unplug VBD %s') % vbd_ref) @@ -183,7 +183,7 @@ class VMHelper(HelperBase): #with Josh Kearney session.wait_for_task(0, task) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod @@ -199,11 +199,11 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, + network_ref) vif_ref = session.call_xenapi('VIF.create', vif_rec) - logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, + vm_ref, network_ref) return vif_ref @classmethod @@ -213,8 +213,7 @@ class VMHelper(HelperBase): """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - logging.debug(_("Snapshotting VM %s with label '%s'..."), - vm_ref, label) + LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -227,8 +226,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, + vm_ref) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -241,8 +240,7 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as '%s'"), - vdi_uuids, image_name) + LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name) params = {'vdi_uuids': vdi_uuids, 'image_name': image_name, @@ -260,7 +258,7 @@ class VMHelper(HelperBase): """ url = images.image_url(image) access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s", url, access) + LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url @@ -278,7 +276,7 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref @@ -289,7 +287,7 @@ class VMHelper(HelperBase): pv = True elif pv_str.lower() == 'false': pv = False - logging.debug("PV Kernel in VDI:%d", pv) + LOG.debug(_("PV Kernel in VDI:%d"), pv) return pv @classmethod @@ -317,10 +315,9 @@ class VMHelper(HelperBase): vdi = session.get_xenapi().VBD.get_VDI(vbd) # Test valid VDI record = session.get_xenapi().VDI.get_record(vdi) - logging.debug(_('VDI %s is still available'), - record['uuid']) + LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) else: vdis.append(vdi) if len(vdis) > 0: @@ -331,10 +328,10 @@ class VMHelper(HelperBase): @classmethod def compile_info(cls, record): """Fill record with VM status information""" - logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"), - record['power_state']) - logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"), - XENAPI_POWER_STATE[record['power_state']]) + LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), + record['power_state']) + LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"), + XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, @@ -388,11 +385,9 @@ def get_vhd_parent(session, vdi_rec): """ if 'vhd-parent' in vdi_rec['sm_config']: parent_uuid = vdi_rec['sm_config']['vhd-parent'] - #NOTE(sirp): changed xenapi -> get_xenapi() parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - #NOTE(sirp): changed log -> logging - logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) return parent_ref, parent_rec else: return None @@ -409,7 +404,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def scan_sr(session, instance_id, sr_ref): - logging.debug(_("Re-scanning SR %s"), sr_ref) + LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(instance_id, task) @@ -433,10 +428,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - logging.debug( - _("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), - parent_uuid, original_parent_uuid) + LOG.debug(_("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), parent_uuid, + original_parent_uuid) else: done.send(parent_uuid) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 146b9e6df091..7e3585991a63 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -18,10 +19,11 @@ Management class for VM-related functions (spawn, reboot, etc). """ -import logging +import json from nova import db from nova import context +from nova import log as logging from nova import exception from nova import utils @@ -31,12 +33,14 @@ from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.vm_utils import ImageType +XenAPI = None +LOG = logging.getLogger("nova.virt.xenapi.vmops") + class VMOps(object): """ Management class for VM-related tasks """ - def __init__(self, session): self.XenAPI = session.get_imported_xenapi() self._session = session @@ -92,10 +96,9 @@ class VMOps(object): if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) - logging.debug(_('Starting VM %s...'), vm_ref) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info(_('Spawning VM %s created %s.'), instance.name, - vm_ref) + LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -106,12 +109,12 @@ class VMOps(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('Instance %s: booted'), instance['name']) + LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() except Exception, exc: - logging.warn(exc) - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.warn(exc) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -120,6 +123,20 @@ class VMOps(object): timer.f = _wait_for_boot return timer.start(interval=0.5, now=True) + def _get_vm_opaque_ref(self, instance_or_vm): + """Refactored out the common code of many methods that receive either + a vm name or a vm instance, and want a vm instance in return. + """ + try: + instance_name = instance_or_vm.name + vm = VMHelper.lookup(self._session, instance_name) + except AttributeError: + # A vm opaque ref was passed + vm = instance_or_vm + if vm is None: + raise Exception(_('Instance not present %s') % instance_name) + return vm + def snapshot(self, instance, name): """ Create snapshot from a running VM instance @@ -168,11 +185,7 @@ class VMOps(object): def reboot(self, instance): """Reboot VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) self._session.wait_for_task(instance.id, task) @@ -194,7 +207,7 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # Disk clean-up if vdis: @@ -203,39 +216,31 @@ class VMOps(object): task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) def _wait_with_callback(self, instance_id, task, callback): ret = None try: ret = self._session.wait_for_task(instance_id, task) - except XenAPI.Failure, exc: - logging.warn(exc) + except self.XenAPI.Failure, exc: + LOG.exception(exc) callback(ret) def pause(self, instance, callback): """Pause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.pause', vm) self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.unpause', vm) self._wait_with_callback(instance.id, task, callback) @@ -270,10 +275,7 @@ class VMOps(object): def get_diagnostics(self, instance): """Return data about VM diagnostics""" - vm = VMHelper.lookup(self._session, instance.name) - if vm is None: - raise exception.NotFound(_("Instance not found %s") % - instance.name) + vm = self._get_vm_opaque_ref(instance) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) @@ -286,3 +288,175 @@ class VMOps(object): """Return link to instance's ajax console""" # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + + def list_from_xenstore(self, vm, path): + """Runs the xenstore-ls command to get a listing of all records + from 'path' downward. Returns a dict with the sub-paths as keys, + and the value stored in those paths as values. If nothing is + found at that path, returns None. + """ + ret = self._make_xenstore_call('list_records', vm, path) + return json.loads(ret) + + def read_from_xenstore(self, vm, path): + """Returns the value stored in the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the read process. + """ + try: + ret = self._make_xenstore_call('read_record', vm, path, + {'ignore_missing_path': 'True'}) + except self.XenAPI.Failure, e: + return None + ret = json.loads(ret) + if ret == "None": + # Can't marshall None over RPC calls. + return None + return ret + + def write_to_xenstore(self, vm, path, value): + """Writes the passed value to the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the write process. + """ + return self._make_xenstore_call('write_record', vm, path, + {'value': json.dumps(value)}) + + def clear_xenstore(self, vm, path): + """Deletes the VM's xenstore record for the specified path. + If there is no such record, the request is ignored. + """ + self._make_xenstore_call('delete_record', vm, path) + + def _make_xenstore_call(self, method, vm, path, addl_args={}): + """Handles calls to the xenstore xenapi plugin.""" + return self._make_plugin_call('xenstore.py', method=method, vm=vm, + path=path, addl_args=addl_args) + + def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): + """Abstracts out the process of calling a method of a xenapi plugin. + Any errors raised by the plugin will in turn raise a RuntimeError here. + """ + vm = self._get_vm_opaque_ref(vm) + rec = self._session.get_xenapi().VM.get_record(vm) + args = {'dom_id': rec['domid'], 'path': path} + args.update(addl_args) + # If the 'testing_mode' attribute is set, add that to the args. + if getattr(self, 'testing_mode', False): + args['testing_mode'] = 'true' + try: + task = self._session.async_call_plugin(plugin, method, args) + ret = self._session.wait_for_task(0, task) + except self.XenAPI.Failure, e: + raise RuntimeError("%s" % e.details[-1]) + return ret + + def add_to_xenstore(self, vm, path, key, value): + """Adds the passed key/value pair to the xenstore record for + the given VM at the specified location. A XenAPIPlugin.PluginError + will be raised if any error is encountered in the write process. + """ + current = self.read_from_xenstore(vm, path) + if not current: + # Nothing at that location + current = {key: value} + else: + current[key] = value + self.write_to_xenstore(vm, path, current) + + def remove_from_xenstore(self, vm, path, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstoreirecord data for the given VM. + If the key doesn't exist, the request is ignored. + """ + current = self.list_from_xenstore(vm, path) + if not current: + return + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + keys.sort(lambda x, y: cmp(y.count('/'), x.count('/'))) + for key in keys: + if path: + keypath = "%s/%s" % (path, key) + else: + keypath = key + self._make_xenstore_call('delete_record', vm, keypath) + + ######################################################################## + ###### The following methods interact with the xenstore parameter + ###### record, not the live xenstore. They were created before I + ###### knew the difference, and are left in here in case they prove + ###### to be useful. They all have '_param' added to their method + ###### names to distinguish them. (dabo) + ######################################################################## + def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix): + """Returns a dict of all the keys in the xenstore parameter record + for the given instance that begin with the key_prefix. + """ + data = self.read_from_param_xenstore(instance_or_vm) + badkeys = [k for k in data.keys() + if not k.startswith(key_prefix)] + for badkey in badkeys: + del data[badkey] + return data + + def read_from_param_xenstore(self, instance_or_vm, keys=None): + """Returns the xenstore parameter record data for the specified VM + instance as a dict. Accepts an optional key or list of keys; if a + value for 'keys' is passed, the returned dict is filtered to only + return the values for those keys. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + data = self._session.call_xenapi_request('VM.get_xenstore_data', + (vm, )) + ret = {} + if keys is None: + keys = data.keys() + elif isinstance(keys, basestring): + keys = [keys] + for key in keys: + raw = data.get(key) + if raw: + ret[key] = json.loads(raw) + else: + ret[key] = raw + return ret + + def add_to_param_xenstore(self, instance_or_vm, key, val): + """Takes a key/value pair and adds it to the xenstore parameter + record for the given vm instance. If the key exists in xenstore, + it is overwritten""" + vm = self._get_vm_opaque_ref(instance_or_vm) + self.remove_from_param_xenstore(instance_or_vm, key) + jsonval = json.dumps(val) + self._session.call_xenapi_request('VM.add_to_xenstore_data', + (vm, key, jsonval)) + + def write_to_param_xenstore(self, instance_or_vm, mapping): + """Takes a dict and writes each key/value pair to the xenstore + parameter record for the given vm instance. Any existing data for + those keys is overwritten. + """ + for k, v in mapping.iteritems(): + self.add_to_param_xenstore(instance_or_vm, k, v) + + def remove_from_param_xenstore(self, instance_or_vm, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstore parameter record data for the given VM. + If the key doesn't exist, the request is ignored. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + for key in keys: + self._session.call_xenapi_request('VM.remove_from_xenstore_data', + (vm, key)) + + def clear_param_xenstore(self, instance_or_vm): + """Removes all data from the xenstore parameter record for this VM.""" + self.write_to_param_xenstore(instance_or_vm, {}) + ######################################################################## diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 1ca813bcf8f2..0cd15b950f17 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -21,16 +21,17 @@ and storage repositories import re import string -import logging from nova import db from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.volume_utils") class StorageError(Exception): @@ -53,7 +54,7 @@ class VolumeHelper(HelperBase): """ sr_ref = session.get_xenapi().SR.get_by_name_label(label) if len(sr_ref) == 0: - logging.debug('Introducing %s...', label) + LOG.debug(_('Introducing %s...'), label) record = {} if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], @@ -70,10 +71,10 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - logging.debug('Introduced %s as %s.', label, sr_ref) + LOG.debug(_('Introduced %s as %s.'), label, sr_ref) return sr_ref except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to create Storage Repository')) else: return sr_ref[0] @@ -85,32 +86,32 @@ class VolumeHelper(HelperBase): vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref @classmethod def destroy_iscsi_storage(cls, session, sr_ref): """Forget the SR whilst preserving the state of the disk""" - logging.debug("Forgetting SR %s ... ", sr_ref) + LOG.debug(_("Forgetting SR %s ... "), sr_ref) pbds = [] try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when getting PBDs for %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), + exc, sr_ref) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when unplugging PBD %s', - exc, pbd) + LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), + exc, pbd) try: session.get_xenapi().SR.forget(sr_ref) - logging.debug("Forgetting SR %s done.", sr_ref) + LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when forgetting SR %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, + sr_ref) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -118,12 +119,12 @@ class VolumeHelper(HelperBase): try: vdis = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to get record' ' of VDI %s on') % vdis[0]) else: @@ -141,7 +142,7 @@ class VolumeHelper(HelperBase): vdi_rec['xenstore_data'], vdi_rec['sm_config']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI for SR %s') % sr_ref) @@ -165,11 +166,8 @@ class VolumeHelper(HelperBase): target_host = _get_target_host(iscsi_portal) target_port = _get_target_port(iscsi_portal) target_iqn = _get_iqn(iscsi_name, volume_id) - logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', - volume_id, - target_host, - target_port, - target_iqn) + LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', + volume_id, target_host, target_port, target_iqn) if (device_number < 0) or \ (volume_id is None) or \ (target_host is None) or \ @@ -196,19 +194,23 @@ class VolumeHelper(HelperBase): elif re.match('^[0-9]+$', mountpoint): return string.atoi(mountpoint, 10) else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) + LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint) return -1 -def _get_volume_id(path): +def _get_volume_id(path_or_id): """Retrieve the volume id from device_path""" + # If we have the ID and not a path, just return it. + if isinstance(path_or_id, int): + return path_or_id # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path[path.find('/vol-') + 1:] - if volume_id == path: - volume_id = path[path.find('-vol-') + 1:].replace('--', '-') + volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + if volume_id == path_or_id: + volume_id = path_or_id[path_or_id.find('-vol-') + 1:] + volume_id = volume_id.replace('--', '-') return volume_id @@ -253,7 +255,7 @@ def _get_target(volume_id): "sendtargets -p %s" % volume_ref['host']) except exception.ProcessExecutionError, exc: - logging.warn(exc) + LOG.exception(exc) else: targets = r.splitlines() if len(_e) == 0 and len(targets) == 1: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index fdeb2506ca0a..189f968c62f3 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,14 +17,17 @@ """ Management class for Storage-related functions (attach, detach, etc). """ -import logging from nova import exception +from nova import log as logging from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError +LOG = logging.getLogger("nova.virt.xenapi.volumeops") + + class VolumeOps(object): """ Management class for Volume-related tasks @@ -45,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - logging.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %s, %s, %s"), + instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -61,7 +64,7 @@ class VolumeOps(object): try: vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to create VDI on SR %s for instance %s') % (sr_ref, @@ -73,7 +76,7 @@ class VolumeOps(object): vol_rec['deviceNumber'], False) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to use SR %s for instance %s') % (sr_ref, @@ -84,13 +87,13 @@ class VolumeOps(object): vbd_ref) self._session.wait_for_task(vol_rec['deviceNumber'], task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - logging.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s attached to instance %s'), + mountpoint, instance_name) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -100,13 +103,13 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) else: try: @@ -114,13 +117,13 @@ class VolumeOps(object): vbd_ref) VMHelper.unplug_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to detach volume %s') % mountpoint) try: VMHelper.destroy_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - logging.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s detached from instance %s'), + mountpoint, instance_name) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index e1ad04b15e22..fe1e168778d0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,15 +20,15 @@ A connection to XenServer or Xen Cloud Platform. The concurrency model for this class is as follows: -All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator -deferredToThread). They are remote calls, and so may hang for the usual -reasons. They should not be allowed to block the reactor thread. +All XenAPI calls are on a green thread (using eventlet's "tpool" +thread pool). They are remote calls, and so may hang for the usual +reasons. All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async -(using XenAPI.VM.async_start etc). These return a task, which can then be -polled for completion. Polling is handled using reactor.callLater. +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. -This combination of techniques means that we don't block the reactor thread at +This combination of techniques means that we don't block the main thread at all, and at the same time we don't hold lots of threads waiting for long-running operations. @@ -50,7 +51,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ -import logging import sys import xmlrpclib @@ -61,9 +61,14 @@ from nova import context from nova import db from nova import utils from nova import flags +from nova import log as logging from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps + +LOG = logging.getLogger("nova.virt.xenapi") + + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', @@ -81,7 +86,7 @@ flags.DEFINE_string('xenapi_connection_password', flags.DEFINE_float('xenapi_task_poll_interval', 0.5, 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' + '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, @@ -217,6 +222,14 @@ class XenAPISession(object): f = f.__getattr__(m) return tpool.execute(f, *args) + def call_xenapi_request(self, method, *args): + """Some interactions with dom0, such as interacting with xenstore's + param record, require using the xenapi_request method of the session + object. This wraps that call on a background thread. + """ + f = self._session.xenapi_request + return tpool.execute(f, method, *args) + def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" return tpool.execute(self._unwrap_plugin_exceptions, @@ -226,7 +239,6 @@ class XenAPISession(object): def wait_for_task(self, id, task): """Return the result of the given task. The task is polled until it completes.""" - done = event.Event() loop = utils.LoopingCall(self._poll_task, id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) @@ -239,7 +251,7 @@ class XenAPISession(object): return self.XenAPI.Session(url) def _poll_task(self, id, task, done): - """Poll the given XenAPI task, and fire the given Deferred if we + """Poll the given XenAPI task, and fire the given action if we get a result.""" try: name = self._session.xenapi.task.get_name_label(task) @@ -252,7 +264,7 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info(_("Task [%s] %s status: success %s") % ( + LOG.info(_("Task [%s] %s status: success %s") % ( name, task, result)) @@ -260,7 +272,7 @@ class XenAPISession(object): else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - logging.warn(_("Task [%s] %s status: %s %s") % ( + LOG.warn(_("Task [%s] %s status: %s %s") % ( name, task, status, @@ -268,7 +280,7 @@ class XenAPISession(object): done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.warn(exc) done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): @@ -276,7 +288,7 @@ class XenAPISession(object): try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -289,12 +301,12 @@ class XenAPISession(object): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) raise def _parse_xmlrpc_value(val): - """Parse the given value as if it were an XML-RPC value. This is + """Parse the given value as if it were an XML-RPC value. This is sometimes used as the format for the task.result field.""" if not val: return val diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py index d6e944fc0bdf..56ef9332e37e 100644 --- a/nova/volume/__init__.py +++ b/nova/volume/__init__.py @@ -16,16 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -:mod:`nova.volume` -- Nova Block Storage -===================================================== - -.. automodule:: nova.volume - :platform: Unix -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" +from nova.volume.api import API diff --git a/nova/volume/api.py b/nova/volume/api.py new file mode 100644 index 000000000000..ce4831cc38b2 --- /dev/null +++ b/nova/volume/api.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to volumes. +""" + +import datetime + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + +LOG = logging.getLogger('nova.volume') + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description): + if quota.allowed_volumes(context, 1, size) < 1: + LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"), + context.project_id, size) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %s") % size) + + options = { + 'size': size, + 'user_id': context.user.id, + 'project_id': context.project_id, + 'availability_zone': FLAGS.storage_availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description} + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id']}}) + return volume + + def delete(self, context, volume_id): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = datetime.datetime.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + def update(self, context, volume_id, fields): + self.db.volume_update(context, volume_id, fields) + + def get(self, context, volume_id): + return self.db.volume_get(context, volume_id) + + def get_all(self, context): + if context.user.is_admin(): + return self.db.volume_get_all(context) + return self.db.volume_get_all_by_project(context, context.project_id) + + def check_attach(self, context, volume_id): + volume = self.get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + if volume['attach_status'] == "attached": + raise exception.ApiError(_("Volume is already attached")) + + def check_detach(self, context, volume_id): + volume = self.get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] == "available": + raise exception.ApiError(_("Volume is already detached")) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 8353b9712a3a..6bc925f3eedf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,15 +20,15 @@ Drivers for volumes. """ -import logging -import os import time from nova import exception from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.volume.driver") FLAGS = flags.FLAGS flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') @@ -73,13 +73,15 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception(_("Recovering from a failed execute." - "Try number %s"), tries) + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - if not os.path.isdir("/dev/%s" % FLAGS.volume_group): + out, err = self._execute("sudo vgs --noheadings -o name") + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) @@ -205,7 +207,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE AOE: %s"), cmd) + LOG.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -310,5 +312,5 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE ISCSI: %s"), cmd) + LOG.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 966334c50dda..6348539c50d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,17 +42,18 @@ intact. """ -import logging import datetime from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils +LOG = logging.getLogger('nova.volume.manager') FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'nova', @@ -81,7 +82,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug(_("Re-exporting %s volumes"), len(volumes)) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +90,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info(_("volume %s: creating"), volume_ref['name']) + LOG.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +99,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug(_("volume %s: creating lv of size %sG"), - volume_ref['name'], volume_ref['size']) + LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], + volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug(_("volume %s: creating export"), volume_ref['name']) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug(_("volume %s: created successfully"), volume_ref['name']) + LOG.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -120,12 +121,12 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - logging.debug(_("volume %s: removing export"), volume_ref['name']) + LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug(_("volume %s: deleting"), volume_ref['name']) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) + LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/nova/wsgi.py b/nova/wsgi.py index b5d6b96c1592..e999f76a3a3c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -22,7 +22,6 @@ Utility methods for working with WSGI servers """ import json -import logging import sys from xml.dom import minidom @@ -35,18 +34,30 @@ import webob import webob.dec import webob.exc +from nova import log as logging -logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.DEBUG): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): + logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" + logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) @@ -59,7 +70,9 @@ class Server(object): def _run(self, application, socket): """Start a WSGI server in a new green thread.""" - eventlet.wsgi.server(socket, application, custom_pool=self.pool) + logger = logging.getLogger('eventlet.wsgi.server') + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=WritableLogger(logger)) class Application(object): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index 2d323a0167ba..8e7a829d5a62 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -45,6 +45,7 @@ class PluginError(Exception): def __init__(self, *args): Exception.__init__(self, *args) + class ArgumentError(PluginError): """Raised when required arguments are missing, argument values are invalid, or incompatible arguments are given. @@ -67,6 +68,7 @@ def ignore_failure(func, *args, **kwargs): ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + def validate_exists(args, key, default=None): """Validates that a string argument to a RPC method call is given, and matches the shell-safe regex, with an optional default value in case it @@ -76,20 +78,24 @@ def validate_exists(args, key, default=None): """ if key in args: if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + raise ArgumentError('Argument %r value %r is too short.' % + (key, args[key])) if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + raise ArgumentError('Argument %r value %r contains invalid ' + 'characters.' % (key, args[key])) if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + raise ArgumentError('Argument %r value %r starts with a hyphen.' + % (key, args[key])) return args[key] elif default is not None: return default else: raise ArgumentError('Argument %s is required.' % key) + def validate_bool(args, key, default=None): - """Validates that a string argument to a RPC method call is a boolean string, - with an optional default value in case it does not exist. + """Validates that a string argument to a RPC method call is a boolean + string, with an optional default value in case it does not exist. Returns the python boolean value. """ @@ -99,7 +105,9 @@ def validate_bool(args, key, default=None): elif value.lower() == 'false': return False else: - raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + raise ArgumentError("Argument %s may not take value %r. " + "Valid values are ['true', 'false']." % (key, value)) + def exists(args, key): """Validates that a freeform string argument to a RPC method call is given. @@ -110,6 +118,7 @@ def exists(args, key): else: raise ArgumentError('Argument %s is required.' % key) + def optional(args, key): """If the given key is in args, return the corresponding value, otherwise return None""" @@ -122,13 +131,14 @@ def get_this_host(session): def get_domain_0(session): this_host_ref = get_this_host(session) - expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' + expr = expr % this_host_ref return session.xenapi.VM.get_all_records_where(expr).keys()[0] def create_vdi(session, sr_ref, name_label, virtual_size, read_only): vdi_ref = session.xenapi.VDI.create( - { 'name_label': name_label, + {'name_label': name_label, 'name_description': '', 'SR': sr_ref, 'virtual_size': str(virtual_size), @@ -138,7 +148,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only): 'xenstore_data': {}, 'other_config': {}, 'sm_config': {}, - 'tags': [] }) + 'tags': []}) logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, virtual_size, read_only, sr_ref) return vdi_ref diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py new file mode 100755 index 000000000000..695bf3448d69 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for reading/writing information to xenstore +# + +try: + import json +except ImportError: + import simplejson as json +import subprocess + +import XenAPIPlugin + +import pluginlib_nova as pluginlib +pluginlib.configure_logging("xenstore") + + +def jsonify(fnc): + def wrapper(*args, **kwargs): + return json.dumps(fnc(*args, **kwargs)) + return wrapper + + +@jsonify +def read_record(self, arg_dict): + """Returns the value stored at the given path for the given dom_id. + These must be encoded as key/value pairs in arg_dict. You can + optinally include a key 'ignore_missing_path'; if this is present + and boolean True, attempting to read a non-existent path will return + the string 'None' instead of raising an exception. + """ + cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict + try: + return _run_command(cmd).rstrip("\n") + except pluginlib.PluginError, e: + if arg_dict.get("ignore_missing_path", False): + cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" + cmd = cmd % arg_dict + ret = _run_command(cmd).strip() + # If the path exists, the cmd should return "0" + if ret != "0": + # No such path, so ignore the error and return the + # string 'None', since None can't be marshalled + # over RPC. + return "None" + # Either we shouldn't ignore path errors, or another + # error was hit. Re-raise. + raise + + +@jsonify +def write_record(self, arg_dict): + """Writes to xenstore at the specified path. If there is information + already stored in that location, it is overwritten. As in read_record, + the dom_id and path must be specified in the arg_dict; additionally, + you must specify a 'value' key, whose value must be a string. Typically, + you can json-ify more complex values and store the json output. + """ + cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'" + cmd = cmd % arg_dict + _run_command(cmd) + return arg_dict["value"] + + +@jsonify +def list_records(self, arg_dict): + """Returns all the stored data at or below the given path for the + given dom_id. The data is returned as a json-ified dict, with the + path as the key and the stored value as the value. If the path + doesn't exist, an empty dict is returned. + """ + cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict + cmd = cmd.rstrip("/") + try: + recs = _run_command(cmd) + except pluginlib.PluginError, e: + if "No such file or directory" in "%s" % e: + # Path doesn't exist. + return {} + return str(e) + raise + base_path = arg_dict["path"] + paths = _paths_from_ls(recs) + ret = {} + for path in paths: + if base_path: + arg_dict["path"] = "%s/%s" % (base_path, path) + else: + arg_dict["path"] = path + rec = read_record(self, arg_dict) + try: + val = json.loads(rec) + except ValueError: + val = rec + ret[path] = val + return ret + + +@jsonify +def delete_record(self, arg_dict): + """Just like it sounds: it removes the record for the specified + VM and the specified path from xenstore. + """ + cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict + return _run_command(cmd) + + +def _paths_from_ls(recs): + """The xenstore-ls command returns a listing that isn't terribly + useful. This method cleans that up into a dict with each path + as the key, and the associated string as the value. + """ + ret = {} + last_nm = "" + level = 0 + path = [] + ret = [] + for ln in recs.splitlines(): + nm, val = ln.rstrip().split(" = ") + barename = nm.lstrip() + this_level = len(nm) - len(barename) + if this_level == 0: + ret.append(barename) + level = 0 + path = [] + elif this_level == level: + # child of same parent + ret.append("%s/%s" % ("/".join(path), barename)) + elif this_level > level: + path.append(last_nm) + ret.append("%s/%s" % ("/".join(path), barename)) + level = this_level + elif this_level < level: + path = path[:this_level] + ret.append("%s/%s" % ("/".join(path), barename)) + level = this_level + last_nm = barename + return ret + + +def _run_command(cmd): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + proc.wait() + err = proc.stderr.read() + if err: + raise pluginlib.PluginError(err) + return proc.stdout.read() + + +if __name__ == "__main__": + XenAPIPlugin.dispatch( + {"read_record": read_record, + "write_record": write_record, + "list_records": list_records, + "delete_record": delete_record}) diff --git a/setup.cfg b/setup.cfg index 14dcb5c8edde..9c0a331e35e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,17 @@ tag_build = tag_date = 0 tag_svn_revision = 0 +[compile_catalog] +directory = locale +domain = nova + +[update_catalog] +domain = nova +output_dir = locale +input_file = locale/nova.pot + +[extract_messages] +keywords = _ l_ lazy_gettext +mapping_file = babel.cfg +output_file = locale/nova.pot + diff --git a/setup.py b/setup.py index 1abf4d9fe964..3608ff805a32 100644 --- a/setup.py +++ b/setup.py @@ -24,6 +24,15 @@ from setuptools.command.sdist import sdist from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace +from nova import version + +if os.path.isdir('.bzr'): + with open("nova/vcsversion.py", 'w') as version_file: + vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], + stdout=subprocess.PIPE) + vcsversion = vcs_cmd.communicate()[0] + version_file.write(vcsversion) + class local_BuildDoc(BuildDoc): def run(self): @@ -48,14 +57,25 @@ class local_sdist(sdist): changelog_file.write(str_dict_replace(changelog, mailmap)) sdist.run(self) +nova_cmdclass= { 'sdist': local_sdist, + 'build_sphinx' : local_BuildDoc } + +try: + from babel.messages import frontend as babel + nova_cmdclass['compile_catalog'] = babel.compile_catalog + nova_cmdclass['extract_messages'] = babel.extract_messages + nova_cmdclass['init_catalog'] = babel.init_catalog + nova_cmdclass['update_catalog'] = babel.update_catalog +except: + pass + setup(name='nova', - version='2011.1', + version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - cmdclass={ 'sdist': local_sdist, - 'build_sphinx' : local_BuildDoc }, + cmdclass=nova_cmdclass, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector', @@ -64,9 +84,11 @@ setup(name='nova', 'bin/nova-dhcpbridge', 'bin/nova-import-canonical-imagestore', 'bin/nova-instancemonitor', + 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', 'bin/nova-objectstore', 'bin/nova-scheduler', + 'bin/nova-spoolsentry', 'bin/nova-volume', 'tools/nova-debug']) diff --git a/tools/pip-requires b/tools/pip-requires index e9559521b5db..341043114832 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -22,4 +22,6 @@ mox==0.5.0 greenlet==0.3.1 nose bzr -Twisted>=10.1.0 \ No newline at end of file +Twisted>=10.1.0 +PasteDeploy +paste