merged trunk

This commit is contained in:
Vishvananda Ishaya 2011-01-18 18:29:56 -08:00
commit eb33a6b78b
210 changed files with 14870 additions and 2406 deletions

View File

@ -12,3 +12,4 @@ CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py

View File

@ -16,6 +16,8 @@
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<masumotok@nttdata.co.jp> <root@openstack2-api>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
<mordred@inaugust.com> <mordred@hudson>
<paul@openstack.org> <pvoccio@castor.local>
<paul@openstack.org> <paul.voccio@rackspace.com>
@ -30,3 +32,4 @@
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>

15
Authors
View File

@ -3,8 +3,9 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com>
Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
@ -13,18 +14,29 @@ Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Ewan Mellor <ewan.mellor@citrix.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com>
Jonathan Bryce <jbryce@jbryce.com>
Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
@ -39,4 +51,3 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>

View File

@ -12,6 +12,7 @@ include nova/cloudpipe/bootscript.sh
include nova/cloudpipe/client.ovpn.template
include nova/compute/fakevirtinstance.xml
include nova/compute/interfaces.template
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
include nova/tests/CA/

4
README
View File

@ -1,7 +1,7 @@
The Choose Your Own Adventure README for Nova:
You have come across a cloud computing fabric controller. It has identified
itself as "Nova." It is apparent that it maintains compatability with
itself as "Nova." It is apparent that it maintains compatibility with
the popular Amazon EC2 and S3 APIs.
To monitor it from a distance: follow @novacc on twitter
@ -10,7 +10,7 @@ To tame it for use in your own cloud: read http://nova.openstack.org/getting.sta
To study its anatomy: read http://nova.openstack.org/architecture.html
To disect it in detail: visit http://code.launchpad.net/nova
To dissect it in detail: visit http://code.launchpad.net/nova
To taunt it with its weaknesses: use http://bugs.launchpad.net/nova

2
babel.cfg Normal file
View File

@ -0,0 +1,2 @@
[python: **.py]

137
bin/nova-ajax-console-proxy Executable file
View File

@ -0,0 +1,137 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ajax Console Proxy Server"""
from eventlet import greenthread
from eventlet.green import urllib2
import exceptions
import gettext
import logging
import os
import sys
import time
import urlparse
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('ajax_console_idle_timeout', 300,
'Seconds before idle connection destroyed')
LOG = logging.getLogger('nova.ajax_console_proxy')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
class AjaxConsoleProxy(object):
tokens = {}
def __call__(self, env, start_response):
try:
req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
env['HTTP_HOST'],
env['PATH_INFO'],
env['QUERY_STRING'])
if 'HTTP_REFERER' in env:
auth_url = env['HTTP_REFERER']
else:
auth_url = req_url
auth_params = urlparse.parse_qs(urlparse.urlparse(auth_url).query)
parsed_url = urlparse.urlparse(req_url)
auth_info = AjaxConsoleProxy.tokens[auth_params['token'][0]]
args = auth_info['args']
auth_info['last_activity'] = time.time()
remote_url = ("http://%s:%s%s?token=%s" % (
str(args['host']),
str(args['port']),
parsed_url.path,
str(args['token'])))
opener = urllib2.urlopen(remote_url, env['wsgi.input'].read())
body = opener.read()
info = opener.info()
start_response("200 OK", info.dict.items())
return body
except (exceptions.KeyError):
if env['PATH_INFO'] != '/favicon.ico':
LOG.audit("Unauthorized request %s, %s"
% (req_url, str(env)))
start_response("401 NOT AUTHORIZED", [])
return "Not Authorized"
except Exception:
start_response("500 ERROR", [])
return "Server Error"
def register_listeners(self):
class Callback:
def __call__(self, data, message):
if data['method'] == 'authorize_ajax_console':
AjaxConsoleProxy.tokens[data['args']['token']] = \
{'args': data['args'], 'last_activity': time.time()}
conn = rpc.Connection.instance(new=True)
consumer = rpc.TopicConsumer(
connection=conn,
topic=FLAGS.ajax_console_proxy_topic)
consumer.register_callback(Callback())
def delete_expired_tokens():
now = time.time()
to_delete = []
for k, v in AjaxConsoleProxy.tokens.items():
if now - v['last_activity'] > FLAGS.ajax_console_idle_timeout:
to_delete.append(k)
for k in to_delete:
del AjaxConsoleProxy.tokens[k]
utils.LoopingCall(consumer.fetch, auto_ack=True,
enable_callbacks=True).start(0.1)
utils.LoopingCall(delete_expired_tokens).start(1)
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
server = wsgi.Server()
acp = AjaxConsoleProxy()
acp.register_listeners()
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
server.wait()

View File

@ -34,23 +34,53 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import api
from nova import flags
from nova import utils
from nova import log as logging
from nova import wsgi
logging.basicConfig()
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
FLAGS = flags.FLAGS
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
API_ENDPOINTS = ['ec2', 'osapi']
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file)
apps = []
for api in API_ENDPOINTS:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
LOG.debug(_("No paste configuration for app: %s"), api)
continue
LOG.debug(_("App Config: %s\n%r"), api, config)
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
LOG.info(_("Running %s API"), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_port" % api),
getattr(FLAGS, "%s_host" % api)))
if len(apps) == 0:
LOG.error(_("No known API applications configured in %s."),
paste_config_file)
return
# NOTE(todd): redo logging config, verbose could be set in paste config
logging.basicConfig()
server = wsgi.Server()
for app in apps:
server.start(*app)
server.wait()
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
server = wsgi.Server()
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
server.wait()
conf = wsgi.paste_config_file('nova-api.conf')
if conf:
run_app(conf)
else:
LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf')

View File

@ -1,109 +0,0 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
import gettext
import logging
import os
import sys
from paste import deploy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import wsgi
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
FLAGS = flags.FLAGS
API_ENDPOINTS = ['ec2', 'openstack']
def load_configuration(paste_config):
"""Load the paste configuration from the config file and return it."""
config = None
# Try each known name to get the global DEFAULTS, which will give ports
for name in API_ENDPOINTS:
try:
config = deploy.appconfig("config:%s" % paste_config, name=name)
except LookupError:
pass
if config:
verbose = config.get('verbose', None)
if verbose:
FLAGS.verbose = int(verbose) == 1
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
return config
LOG.debug(_("Paste config at %s has no secion for known apis"),
paste_config)
print _("Paste config at %s has no secion for any known apis") % \
paste_config
os.exit(1)
def launch_api(paste_config_file, section, server, port, host):
"""Launch an api server from the specified port and IP."""
LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
app = deploy.loadapp('config:%s' % paste_config_file, name=section)
server.start(app, int(port), host)
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), configfile)
config = load_configuration(paste_config_file)
LOG.debug(_("Configuration: %r"), config)
server = wsgi.Server()
ip = config.get('host', '0.0.0.0')
for api in API_ENDPOINTS:
port = config.get("%s_port" % api, None)
if not port:
continue
host = config.get("%s_host" % api, ip)
launch_api(configfile, api, server, port, host)
LOG.debug(_("All api servers launched, now waiting"))
server.wait()
if __name__ == '__main__':
FLAGS(sys.argv)
configfiles = ['/etc/nova/nova-api.conf']
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
configfiles.insert(0,
os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
for configfile in configfiles:
if os.path.exists(configfile):
run_app(configfile)
break
else:
LOG.debug(_("Skipping missing configuration: %s"), configfile)

View File

@ -36,23 +36,20 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import api
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.basicConfig()
compute = service.Service.create(binary='nova-compute')
network = service.Service.create(binary='nova-network')
@ -62,7 +59,22 @@ if __name__ == '__main__':
service.serve(compute, network, volume, scheduler)
server = wsgi.Server()
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
server.wait()
apps = []
paste_config_file = wsgi.paste_config_file('nova-api.conf')
for api in ['osapi', 'ec2']:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
continue
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_port" % api),
getattr(FLAGS, "%s_host" % api)))
if len(apps) > 0:
logging.basicConfig()
server = wsgi.Server()
for app in apps:
server.start(*app)
server.wait()

44
bin/nova-console Executable file
View File

@ -0,0 +1,44 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Console Proxy."""
import eventlet
eventlet.monkey_patch()
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
service.serve()
service.wait()

View File

@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
"""
import gettext
import logging
import os
import sys
@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from nova.network import linux_net
@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
LOG = logging.getLogger('nova.dhcpbridge')
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
logging.debug("leasing ip")
LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
logging.debug("Adopted old lease or got a change of mac/hostname")
LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
logging.debug("releasing ip")
LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@ -100,6 +102,7 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
logging.basicConfig()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
@ -117,9 +120,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
logging.debug("Called %s for mac %s with ip %s and "
"hostname %s on interface %s",
action, mac, ip, hostname, interface)
LOG.debug(_("Called %s for mac %s with ip %s and "
"hostname %s on interface %s"),
action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)

61
bin/nova-direct-api Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova Direct API."""
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import utils
from nova import wsgi
from nova.api import direct
from nova.compute import api as compute_api
FLAGS = flags.FLAGS
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
direct.register_service('compute', compute_api.ComputeAPI())
direct.register_service('reflect', direct.Reflection())
router = direct.Router()
with_json = direct.JsonParamsMiddleware(router)
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server()
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
server.wait()

View File

@ -23,7 +23,6 @@
import gettext
import os
import logging
import sys
from twisted.application import service
@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
LOG = logging.getLogger('nova.instancemonitor')
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()

156
bin/nova-logspool Normal file
View File

@ -0,0 +1,156 @@
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools for working with logs generated by nova components
"""
import json
import os
import re
import sys
class Request(object):
def __init__(self):
self.time = ""
self.host = ""
self.logger = ""
self.message = ""
self.trace = ""
self.env = ""
self.request_id = ""
def add_error_line(self, error_line):
self.time = " ".join(error_line.split(" ")[:3])
self.host = error_line.split(" ")[3]
self.logger = error_line.split("(")[1].split(" ")[0]
self.request_id = error_line.split("[")[1].split(" ")[0]
error_lines = error_line.split("#012")
self.message = self.clean_log_line(error_lines.pop(0))
self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
def add_environment_line(self, env_line):
self.env = self.clean_env_line(env_line)
def clean_log_line(self, line):
"""Remove log format for time, level, etc: split after context"""
return line.split('] ')[-1]
def clean_env_line(self, line):
"""Also has an 'Environment: ' string in the message"""
return re.sub(r'^Environment: ', '', self.clean_log_line(line))
def clean_trace(self, line):
"""trace has a different format, so split on TRACE:"""
return line.split('TRACE: ')[-1]
def to_dict(self):
return {'traceback': self.trace, 'message': self.message,
'host': self.host, 'env': self.env, 'logger': self.logger,
'request_id': self.request_id}
class LogReader(object):
def __init__(self, filename):
self.filename = filename
self._errors = {}
def process(self, spooldir):
with open(self.filename) as f:
line = f.readline()
while len(line) > 0:
parts = line.split(" ")
level = (len(parts) < 6) or parts[5]
if level == 'ERROR':
self.handle_logged_error(line)
elif level == '[-]' and self.last_error:
# twisted stack trace line
clean_line = " ".join(line.split(" ")[6:])
self.last_error.trace = self.last_error.trace + clean_line
else:
self.last_error = None
line = f.readline()
self.update_spool(spooldir)
def handle_logged_error(self, line):
request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
if not request_id:
raise Exception("Unable to parse request id from %s" % line)
request_id = request_id.group(1)
data = self._errors.get(request_id, Request())
if self.is_env_line(line):
data.add_environment_line(line)
elif self.is_error_line(line):
data.add_error_line(line)
else:
# possibly error from twsited
data.add_error_line(line)
self.last_error = data
self._errors[request_id] = data
def is_env_line(self, line):
return re.search('Environment: ', line)
def is_error_line(self, line):
return re.search('raised', line)
def update_spool(self, directory):
processed_dir = "%s/processed" % directory
self._ensure_dir_exists(processed_dir)
for rid, value in self._errors.iteritems():
if not self.has_been_processed(processed_dir, rid):
with open("%s/%s" % (directory, rid), "w") as spool:
spool.write(json.dumps(value.to_dict()))
self.flush_old_processed_spool(processed_dir)
def _ensure_dir_exists(self, d):
mkdir = False
try:
os.stat(d)
except:
mkdir = True
if mkdir:
os.mkdir(d)
def has_been_processed(self, processed_dir, rid):
rv = False
try:
os.stat("%s/%s" % (processed_dir, rid))
rv = True
except:
pass
return rv
def flush_old_processed_spool(self, processed_dir):
keys = self._errors.keys()
procs = os.listdir(processed_dir)
for p in procs:
if p not in keys:
# log has rotated and the old error won't be seen again
os.unlink("%s/%s" % (processed_dir, p))
if __name__ == '__main__':
filename = '/var/log/nova.log'
spooldir = '/var/spool/nova'
if len(sys.argv) > 1:
filename = sys.argv[1]
if len(sys.argv) > 2:
spooldir = sys.argv[2]
LogReader(filename).process(spooldir)

View File

@ -55,8 +55,8 @@
import datetime
import gettext
import logging
import os
import re
import sys
import time
@ -77,18 +77,22 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.db import migration
logging.basicConfig()
FLAGS = flags.FLAGS
flags.DECLARE('fixed_range', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
class VpnCommands(object):
@ -333,6 +337,11 @@ class ProjectCommands(object):
arguments: name project_manager [description]"""
self.manager.create_project(name, project_manager, description)
def modify(self, name, project_manager, description=None):
"""Modifies a project
arguments: name project_manager [description]"""
self.manager.modify_project(name, project_manager, description)
def delete(self, name):
"""Deletes an existing project
arguments: name"""
@ -432,11 +441,12 @@ class NetworkCommands(object):
"""Class for managing networks."""
def create(self, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None, vpn_start=None):
network_size=None, vlan_start=None, vpn_start=None,
fixed_range_v6=None):
"""Creates fixed ips for host by range
arguments: [fixed_range=FLAG], [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG]"""
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
fixed_range = FLAGS.fixed_range
if not num_networks:
@ -447,11 +457,13 @@ class NetworkCommands(object):
vlan_start = FLAGS.vlan_start
if not vpn_start:
vpn_start = FLAGS.vpn_start
if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6
net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(),
fixed_range, int(num_networks),
int(network_size), int(vlan_start),
int(vpn_start))
int(vpn_start), fixed_range_v6)
class ServiceCommands(object):
@ -499,6 +511,30 @@ class ServiceCommands(object):
db.service_update(ctxt, svc['id'], {'disabled': True})
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
"""Show all fields in the log for the given request. Assumes you
haven't changed the log format too much.
ARGS: request_id [logfile]"""
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
print re.sub('#012', "\n", "\n".join(lines))
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print migration.db_version()
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@ -507,7 +543,9 @@ CATEGORIES = [
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
('service', ServiceCommands)]
('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands)]
def lazy_match(name, key_value_tuples):
@ -546,9 +584,6 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"

97
bin/nova-spoolsentry Normal file
View File

@ -0,0 +1,97 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import json
import logging
import os
import shutil
import sys
import urllib
import urllib2
try:
import cPickle as pickle
except:
import pickle
class SpoolSentry(object):
def __init__(self, spool_dir, sentry_url, key=None):
self.spool_dir = spool_dir
self.sentry_url = sentry_url
self.key = key
def process(self):
for fname in os.listdir(self.spool_dir):
if fname == "processed":
continue
try:
sourcefile = "%s/%s" % (self.spool_dir, fname)
with open(sourcefile) as f:
fdata = f.read()
data_from_json = json.loads(fdata)
data = self.build_data(data_from_json)
self.send_data(data)
destfile = "%s/processed/%s" % (self.spool_dir, fname)
shutil.move(sourcefile, destfile)
except:
logging.exception("Unable to upload record %s", fname)
raise
def build_data(self, filejson):
env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
if filejson['env']:
env = json.loads(filejson['env'])
url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
env['SCRIPT_NAME'], env['PATH_INFO'])
rv = {'logger': filejson['logger'], 'level': logging.ERROR,
'server_name': filejson['host'], 'url': url,
'message': filejson['message'],
'traceback': filejson['traceback']}
rv['data'] = {}
if filejson['env']:
rv['data']['META'] = env
if filejson['request_id']:
rv['data']['request_id'] = filejson['request_id']
return rv
def send_data(self, data):
data = {
'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
'key': self.key
}
req = urllib2.Request(self.sentry_url)
res = urllib2.urlopen(req, urllib.urlencode(data))
if res.getcode() != 200:
raise Exception("Bad HTTP code: %s" % res.getcode())
txt = res.read()
if __name__ == '__main__':
sentryurl = 'http://127.0.0.1/sentry/store/'
key = ''
spooldir = '/var/spool/nova'
if len(sys.argv) > 1:
sentryurl = sys.argv[1]
if len(sys.argv) > 2:
key = sys.argv[2]
if len(sys.argv) > 3:
spooldir = sys.argv[3]
SpoolSentry(spooldir, sentryurl, key).process()

145
bin/stack Executable file
View File

@ -0,0 +1,145 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""CLI for the Direct API."""
import eventlet
eventlet.monkey_patch()
import os
import pprint
import sys
import textwrap
import urllib
import urllib2
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
import gflags
from nova import utils
FLAGS = gflags.FLAGS
gflags.DEFINE_string('host', '127.0.0.1', 'Direct API host')
gflags.DEFINE_integer('port', 8001, 'Direct API host')
gflags.DEFINE_string('user', 'user1', 'Direct API username')
gflags.DEFINE_string('project', 'proj1', 'Direct API project')
USAGE = """usage: stack [options] <controller> <method> [arg1=value arg2=value]
`stack help` should output the list of available controllers
`stack <controller>` should output the available methods for that controller
`stack help <controller>` should do the same
`stack help <controller> <method>` should output info for a method
"""
def format_help(d):
"""Format help text, keys are labels and values are descriptions."""
indent = max([len(k) for k in d])
out = []
for k, v in d.iteritems():
t = textwrap.TextWrapper(initial_indent=' %s ' % k.ljust(indent),
subsequent_indent=' ' * (indent + 6))
out.extend(t.wrap(v))
return out
def help_all():
rv = do_request('reflect', 'get_controllers')
out = format_help(rv)
return (USAGE + str(FLAGS.MainModuleHelp()) +
'\n\nAvailable controllers:\n' +
'\n'.join(out) + '\n')
def help_controller(controller):
rv = do_request('reflect', 'get_methods')
methods = dict([(k.split('/')[2], v) for k, v in rv.iteritems()
if k.startswith('/%s' % controller)])
return ('Available methods for %s:\n' % controller +
'\n'.join(format_help(methods)))
def help_method(controller, method):
rv = do_request('reflect',
'get_method_info',
{'method': '/%s/%s' % (controller, method)})
sig = '%s(%s):' % (method, ', '.join(['='.join(x) for x in rv['args']]))
out = textwrap.wrap(sig, subsequent_indent=' ' * len('%s(' % method))
out.append('\n' + rv['doc'])
return '\n'.join(out)
def do_request(controller, method, params=None):
if params:
data = urllib.urlencode(params)
else:
data = None
url = 'http://%s:%s/%s/%s' % (FLAGS.host, FLAGS.port, controller, method)
headers = {'X-OpenStack-User': FLAGS.user,
'X-OpenStack-Project': FLAGS.project}
req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req)
return utils.loads(resp.read())
if __name__ == '__main__':
args = FLAGS(sys.argv)
cmd = args.pop(0)
if not args:
print help_all()
sys.exit()
first = args.pop(0)
if first == 'help':
action = help_all
params = []
if args:
params.append(args.pop(0))
action = help_controller
if args:
params.append(args.pop(0))
action = help_method
print action(*params)
sys.exit(0)
controller = first
if not args:
print help_controller(controller)
sys.exit()
method = args.pop(0)
params = {}
for x in args:
key, value = x.split('=', 1)
params[key] = value
pprint.pprint(do_request(controller, method, params))

View File

@ -0,0 +1,37 @@
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto_v6.ec2.connection import EC2ConnectionV6
return EC2ConnectionV6(aws_access_key_id, aws_secret_access_key, **kwargs)

View File

View File

@ -0,0 +1,41 @@
'''
Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
import boto
import boto.ec2
from boto_v6.ec2.instance import ReservationV6
class EC2ConnectionV6(boto.ec2.EC2Connection):
'''
EC2Connection for OpenStack IPV6 mode
'''
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeInstancesV6', params,
[('item', ReservationV6)])

View File

@ -0,0 +1,37 @@
'''
Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
import boto
from boto.resultset import ResultSet
from boto.ec2.instance import Reservation
from boto.ec2.instance import Group
from boto.ec2.instance import Instance
class ReservationV6(Reservation):
def startElement(self, name, attrs, connection):
if name == 'instancesSet':
self.instances = ResultSet([('item', InstanceV6)])
return self.instances
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
else:
return None
class InstanceV6(Instance):
def __init__(self, connection=None):
Instance.__init__(self, connection)
self.dns_name_v6 = None
def endElement(self, name, value, connection):
Instance.endElement(self, name, value, connection)
if name == 'dnsNameV6':
self.dns_name_v6 = value
def _update(self, updated):
self.__dict__.update(updated.__dict__)
self.dns_name_v6 = updated.dns_name_v6

View File

@ -78,13 +78,22 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
sudo apt-get install -y socat
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
sudo /etc/init.d/libvirt-bin restart
sudo modprobe nbd
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy
sudo apt-get install -y python-libvirt python-libxml2 python-routes
sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy
sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
#For IPV6
sudo apt-get install -y python-netaddr
sudo apt-get install -y radvd
#(Nati) Note that this configuration is only needed for nova-network node.
sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding"
sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"
if [ "$USE_MYSQL" == 1 ]; then
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
@ -106,6 +115,8 @@ function screen_it {
if [ "$CMD" == "run" ]; then
killall dnsmasq
#For IPv6
killall radvd
screen -d -m -S nova -t nova
sleep 1
if [ "$USE_MYSQL" == 1 ]; then
@ -155,6 +166,7 @@ if [ "$CMD" == "run" ]; then
screen_it network "$NOVA_DIR/bin/nova-network"
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
screen_it test ". $NOVA_DIR/novarc"
screen -S nova -x
fi

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,7 +1,7 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
Overview Sections Copyright 2010 Citrix
Overview Sections Copyright 2010-2011 Citrix
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,6 +1,7 @@
..
Copyright 2010 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -17,36 +18,35 @@
Installing Nova on Multiple Servers
===================================
When you move beyond evaluating the technology and into building an actual
production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
Requirements for a multi-node installation
------------------------------------------
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
Assumptions
^^^^^^^^^^^
-----------
* Networking is configured between/through the physical machines on a single subnet.
* Installation and execution are both performed by root user.
* Installation and execution are both performed by ROOT user.
Step 1 - Use apt-get to get the latest code
-------------------------------------------
Step 1 Use apt-get to get the latest code
-----------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The python-software-properties package is a pre-requisite for setting up the nova package repo:
::
@ -69,201 +69,260 @@ Step 1 Use apt-get to get the latest code
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
Step 2 Setup configuration file (installed in /etc/nova)
---------------------------------------------------------
--------------------------------------------------------
1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
::
--daemonize=1
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova
The following items ALSO need to be defined in /etc/nova/nova.conf. Ive added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
--sql_connection ### Location of Nova SQL DB
--s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
--rabbit_host ### This is where the rabbit AMQP messaging service is hosted
--cc_host ### This is where the the nova-api service lives
--verbose ### Optional but very helpful during initial setup
--ec2_url ### The location to interface nova-api
--network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
nova.network.manager.FlatManager # Simple, no-vlan networking type
nova.network.manager. FlatDHCPManager # Flat networking with DHCP
nova.network.manager.VlanManager # Vlan networking with DHCP /DEFAULT/ if no network manager is defined in nova.conf
--fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
--network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
The following code can be cut and paste, and edited to your setup:
Note: CC_ADDR=<the external IP address of your cloud controller>
Nova development has consolidated all .conf files to nova.conf as of November 2010. References to specific .conf files may be ignored.
#. These need to be defined in the nova.conf configuration file::
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
--s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
# will contain the VM images and buckets
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
--cc_host=$CC_ADDR # This is where the the nova-api service lives
--verbose # Optional but very helpful during initial setup
--ec2_url=http://$CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
#. Create a nova group::
sudo addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
Detailed explanation of the following example is available above.
::
--sql_connection=mysql://root:nova@<CC_ADDR>/nova
--s3_host=<CC_ADDR>
--rabbit_host=<CC_ADDR>
--cc_host=<CC_ADDR>
--verbose
--ec2_url=http://<CC_ADDR>:8773/services/Cloud
--network_manager=nova.network.manager.VlanManager
--fixed_range=<network/prefix>
--network_size=<# of addrs>
2. Create a “nova” group, and set permissions::
cd /etc/nova
chown -R root:nova .
addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
Step 3 Setup the sql db
-----------------------
chown -R root:nova /etc/nova
chmod 644 /etc/nova/nova.conf
Step 3 - Setup the SQL DB (MySQL for this setup)
------------------------------------------------
1. First you 'preseed' to bypass all the installation prompts::
1. First you 'preseed' (using the Quick Start method :doc:`../quickstart`). Run this as root.
bash
MYSQL_PASS=nova
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
2. Install MySQL::
apt-get install -y mysql-server
3. Edit /etc/mysql/my.cnf to change bind-address from localhost to any::
::
sudo apt-get install bzr git-core
sudo bash
export MYSQL_PASS=nova
::
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
2. Install mysql
::
sudo apt-get install -y mysql-server
4. Edit /etc/mysql/my.cnf and set this line: bind-address=0.0.0.0 and then sighup or restart mysql
5. create nova's db
::
mysql -uroot -pnova -e 'CREATE DATABASE nova;'
6. Update the db to include user 'root'@'%'
::
mysql -u root -p nova
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
7. Branch and install Nova
::
sudo -i
cd ~
export USE_MYSQL=1
export MYSQL_PASS=nova
git clone https://github.com/vishvananda/novascript.git
cd novascript
./nova.sh branch
./nova.sh install
./nova.sh run
Step 4 Setup Nova environment
-----------------------------
::
/usr/bin/python /usr/bin/nova-manage user admin <user_name>
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
/usr/bin/python /usr/bin/nova-manage project create network
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected.
More networking details to create a network bridge for flat network
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. In my case, I wanted to keep things as simple as possible and have all the vm guests on the same network as the vm hosts (the compute nodes). Thus, I set the compute node's external IP address to be on the bridge and added eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
service mysql restart
3. Network Configuration
If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes youll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as its set up for you automatically.
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
< begin /etc/network/interfaces >
# The loopback network interface
auto lo
iface lo inet loopback
# Networking for NOVA
auto br100
iface br100 inet dhcp
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
< end /etc/network/interfaces >
Next, restart networking to apply the changes::
sudo /etc/init.d/networking restart
sudo /etc/init.d/networking restart
4. MySQL DB configuration:
Create NOVA database::
Step 5: Create nova certs.
--------------------------
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
Update the DB to include user 'root'@'%' with super user privileges::
Generate the certs as a zip file::
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
Set mySQL root password::
mkdir creds
sudo /usr/bin/python /usr/bin/nova-manage project zip admin admin creds/nova.zip
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
Step 4 - Setup Nova environment
-------------------------------
you can get the rc file more easily with::
These are the commands you run to set up a user and project::
sudo /usr/bin/python /usr/bin/nova-manage project env admin admin creds/novarc
/usr/bin/python /usr/bin/nova-manage user admin <user_name>
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
/usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
Here is an example of what this looks like with real data::
unzip them in your home directory, and add them to your environment::
/usr/bin/python /usr/bin/nova-manage user admin dub
/usr/bin/python /usr/bin/nova-manage project create dubproject dub
/usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
(I chose a /24 since that falls inside my /12 range I set in fixed-range in nova.conf. Currently, there can only be one network, and I am using the max IPs available in a /24. You can choose to use any valid amount that you would like.)
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
Step 5 - Create Nova certifications
-----------------------------------
1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
unzip creds/nova.zip
echo ". creds/novarc" >> ~/.bashrc
~/.bashrc
::
Step 6 Restart all relevant services
------------------------------------
mkdir p /root/creds
/usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
2. Unzip them in your home directory, and add them to your environment.
Restart Libvirt::
::
sudo /etc/init.d/libvirt-bin restart
unzip /root/creds/novacreds.zip -d /root/creds/
cat /root/creds/novarc >> ~/.bashrc
source ~/.bashrc
Restart relevant nova services::
Step 6 - Restart all relevant services
--------------------------------------
sudo /etc/init.d/nova-compute restart
sudo /etc/init.d/nova-volume restart
Restart all six services in total, just to cover the entire spectrum::
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
Step 7 - Closing steps, and cleaning up
---------------------------------------
.. todo:: do we still need the content below?
One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
Bare-metal Provisioning Notes
-----------------------------
euca-authorize -P icmp -t -1:-1 default
euca-authorize -P tcp -p 22 default
To install the base operating system you can use PXE booting.
Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
Types of Hosts
--------------
killall dnsmasq
service nova-network restart
A single machine in your cluster can act as one or more of the following types
of host:
Step 8 Testing the installation
---------------------------------
Nova Services
You can then use `euca2ools` to test some items::
* Network
* Compute
* Volume
* API
* Objectstore
euca-describe-images
euca-describe-instances
If you have issues with the API key, you may need to re-source your creds file::
Other supporting services
. /root/creds/novarc
If you dont get any immediate errors, youre successfully making calls to your cloud!
* Message Queue
* Database (optional)
* Authentication database (optional)
Step 9 - Spinning up a VM for testing
-------------------------------------
Initial Setup
-------------
(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
* Networking
* Cloudadmin User Creation
The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
Deployment Technologies
-----------------------
Download the image, and publish to your bucket:
Once you have machines with a base operating system installation, you can deploy
code and configuration with your favorite tools to specify which machines in
your cluster have which roles:
::
image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
uec-publish-tarball $image mybucket
This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
Create a keypair to SSH to the server:
::
euca-add-keypair mykey > mykey.priv
chmod 0600 mykey.priv
Boot your instance:
::
euca-run-instances $emi -k mykey -t m1.tiny
($emi is replaced with the output from the previous command)
Checking status, and confirming communication:
Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
::
euca-describe-instances
Once in a "running" state, you can use your SSH key connect:
::
ssh -i mykey.priv root@$ipaddress
When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
::
euca-terminate-instances $instance-id
You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
Enjoy your new private cloud, and play responsibly!
* Puppet
* Chef

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -91,11 +91,10 @@ These do NOT have IP addresses in the host to protect host access.
Compute nodes have iptables/ebtables entries created per project and
instance to protect against IP/MAC address spoofing and ARP poisoning.
The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance.
The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge (br100 stored in the Nova database), and allocating a private IP within the project's subnet for the new instance.
If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed.
.. todo:: insert the name of the Linux bridge, is it always named bridge?
External Infrastructure
-----------------------

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -54,7 +54,7 @@ Cloud computing offers different service models depending on the capabilities a
The US-based National Institute of Standards and Technology offers definitions for cloud computing
and the service models that are emerging.
These definitions are summarized from http://csrc.nist.gov/groups/SNS/cloud-computing/.
These definitions are summarized from the `U.S. National Institute of Standards and Technology (NIST) cloud computing research group <http://csrc.nist.gov/groups/SNS/cloud-computing/>`_.
SaaS - Software as a Service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -74,7 +74,6 @@ IaaS - Infrastructure as a Service
Provides infrastructure such as computer instances, network connections, and storage so that people
can run any software or operating system.
Types of Cloud Deployments
--------------------------
@ -87,4 +86,5 @@ A hybrid cloud can be a deployment model, as a composition of both public and pr
Work in the Clouds
------------------
.. todo:: What people have done/sample projects
What have people done with cloud computing? Cloud computing can help with large-scale computing needs or can lead consolidation efforts by virtualizing servers to make more use of existing hardware (and possibly release old hardware from service.) People also use cloud computing for collaboration because of the high availability through networked computers. Productivity suites for word processing, number crunching, and email communications, and more are also available through cloud computing. Cloud computing also avails additional storage to the cloud user, avoiding the need for additional hard drives on your desktop and enabling access to large data storage capacity online in the cloud.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -35,7 +35,8 @@ Contributing Code
To contribute code, sign up for a Launchpad account and sign a contributor license agreement,
available on the `OpenStack Wiki <http://wiki.openstack.org/CLA>`_. Once the CLA is signed you
can contribute code through the Bazaar version control system which is related to your Launchpad account.
can contribute code through the Bazaar version control system which is related to your Launchpad
account. See the :doc:`devref/development.environment` page to get started.
#openstack on Freenode IRC Network
----------------------------------

View File

@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2011.1'
from nova import version as nova_version
#import nova.version
# The full version, including alpha/beta/rc tags.
release = '2011.1-prerelease'
release = nova_version.version_string()
# The short X.Y version.
version = nova_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 OpenStack LLC
Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -60,4 +60,4 @@ Tests
-----
Tests are lacking for the db api layer and for the sqlalchemy driver.
Failures in the drivers would be dectected in other test cases, though.
Failures in the drivers would be detected in other test cases, though.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -88,7 +88,12 @@ Here's how to get the latest code::
source .nova_venv/bin/activate
./run_tests.sh
And then you can do cleaning work or hack hack hack with a branched named cleaning::
Then you can do cleaning work or hack hack hack with a branched named cleaning.
Contributing Your Work
----------------------
Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad::
bzr push lp:~launchpaduserid/nova/cleaning

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,6 @@
..
Copyright (c) 2010 Citrix Systems, Inc.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
@ -29,7 +30,7 @@ Nova (Austin release) uses both direct and topic-based exchanges. The architectu
..
Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshalling and unmarshalling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
Nova RPC Mappings
-----------------
@ -39,7 +40,7 @@ The figure below shows the internals of a RabbitMQ node when a single instance i
Figure 2 shows the following internal elements:
* Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery.
* Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshalled in the message sent by the Topic Publisher (only rpc.call operations).
* Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations).
* Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host').
* Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message.
* Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a RabbitMQ node will have only one topic-based exchange for every topic in Nova.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,3 +1,20 @@
..
Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Installing the Live CD
======================

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@ -105,7 +105,7 @@ It is important to know that there are user-specific (sometimes called global) r
For example: A user can access api commands allowed to the netadmin role (like allocate_address) only if he has the user-specific netadmin role AND the project-specific netadmin role.
More information about RBAC can be found in the :ref:`auth`.
More information about RBAC can be found in :ref:`auth`.
Concept: API
------------
@ -159,12 +159,10 @@ vpn management, and much more.
See :doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags
--------------
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf.
Concept: Plugins
----------------

View File

@ -1,3 +1,20 @@
..
Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Object Model
============
@ -25,29 +42,38 @@ Object Model
Users
-----
Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`.
Projects
--------
For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`.
Images
------
Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`.
Instances
---------
Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`.
Volumes
-------
.. todo:: Write doc about volumes
Security Groups
---------------
In Nova, a security group is a named collection of network access rules, like firewall policies. Read more at `Security Groups <http://nova.openstack.org/nova.concepts.html#concept-security-groups>`_.
VLANs
-----
VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`.
IP Addresses
------------
Nova enables floating IP management.

View File

@ -1,5 +1,5 @@
..
Copyright 2010 United States Government as represented by the
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.

View File

@ -1,3 +1,20 @@
..
Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Service Architecture
====================

View File

@ -1,9 +1,5 @@
[DEFAULT]
verbose = 1
ec2_port = 8773
ec2_address = 0.0.0.0
openstack_port = 8774
openstack_address = 0.0.0.0
#######
# EC2 #
@ -12,52 +8,80 @@ openstack_address = 0.0.0.0
[composite:ec2]
use = egg:Paste#urlmap
/: ec2versions
/services: ec2api
/services/Cloud: ec2cloud
/services/Admin: ec2admin
/latest: ec2metadata
/200: ec2metadata
/20: ec2metadata
/1.0: ec2metadata
[pipeline:ec2api]
pipeline = authenticate router authorizer ec2executor
[pipeline:ec2cloud]
pipeline = logrequest authenticate cloudrequest authorizer ec2executor
#pipeline = logrequest ec2lockout authenticate cloudrequest authorizer ec2executor
[pipeline:ec2admin]
pipeline = logrequest authenticate adminrequest authorizer ec2executor
[pipeline:ec2metadata]
pipeline = logrequest ec2md
[pipeline:ec2versions]
pipeline = logrequest ec2ver
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:authenticate]
paste.filter_factory = nova.api.ec2:authenticate_factory
paste.filter_factory = nova.api.ec2:Authenticate.factory
[filter:router]
paste.filter_factory = nova.api.ec2:router_factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:adminrequest]
controller = nova.api.ec2.admin.AdminController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:authorizer_factory
paste.filter_factory = nova.api.ec2:Authorizer.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:executor_factory
paste.app_factory = nova.api.ec2:Executor.factory
[app:ec2versions]
paste.app_factory = nova.api.ec2:versions_factory
[app:ec2ver]
paste.app_factory = nova.api.ec2:Versions.factory
[app:ec2metadata]
paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory
[app:ec2md]
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
#############
# Openstack #
#############
[composite:openstack]
[composite:osapi]
use = egg:Paste#urlmap
/: osversions
/v1.0: openstackapi
[pipeline:openstackapi]
pipeline = auth ratelimit osapi
pipeline = faultwrap auth ratelimit osapiapp
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:auth]
paste.filter_factory = nova.api.openstack.auth:auth_factory
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory
paste.filter_factory = nova.api.openstack.ratelimiting:RateLimitingMiddleware.factory
[app:osapi]
paste.app_factory = nova.api.openstack:router_factory
[app:osapiapp]
paste.app_factory = nova.api.openstack:APIRouter.factory
[app:osversions]
paste.app_factory = nova.api.openstack:versions_factory
[pipeline:osversions]
pipeline = faultwrap osversionapp
[app:osversionapp]
paste.app_factory = nova.api.openstack:Versions.factory

3
krm_mapping.json.sample Normal file
View File

@ -0,0 +1,3 @@
{
"machine" : ["kernel", "ramdisk"]
}

2130
locale/nova.pot Normal file

File diff suppressed because it is too large Load Diff

View File

@ -15,97 +15,5 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Root WSGI middleware for all API controllers.
**Related Flags**
:osapi_subdomain: subdomain running the OpenStack API (default: api)
:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
"""
import logging
import routes
import webob.dec
from nova import flags
from nova import wsgi
from nova.api import ec2
from nova.api import openstack
from nova.api.ec2 import metadatarequesthandler
flags.DEFINE_string('osapi_subdomain', 'api',
'subdomain running the OpenStack API')
flags.DEFINE_string('ec2api_subdomain', 'ec2',
'subdomain running the EC2 API')
FLAGS = flags.FLAGS
class API(wsgi.Router):
"""Routes top-level requests to the appropriate controller."""
def __init__(self, default_api):
osapi_subdomain = {'sub_domain': [FLAGS.osapi_subdomain]}
ec2api_subdomain = {'sub_domain': [FLAGS.ec2api_subdomain]}
if default_api == 'os':
osapi_subdomain = {}
elif default_api == 'ec2':
ec2api_subdomain = {}
mapper = routes.Mapper()
mapper.sub_domains = True
mapper.connect("/", controller=self.osapi_versions,
conditions=osapi_subdomain)
mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(),
conditions=osapi_subdomain)
mapper.connect("/", controller=self.ec2api_versions,
conditions=ec2api_subdomain)
mapper.connect("/services/{path_info:.*}", controller=ec2.API(),
conditions=ec2api_subdomain)
mrh = metadatarequesthandler.MetadataRequestHandler()
for s in ['/latest',
'/2009-04-04',
'/2008-09-01',
'/2008-02-01',
'/2007-12-15',
'/2007-10-10',
'/2007-08-29',
'/2007-03-01',
'/2007-01-19',
'/1.0']:
mapper.connect('%s/{path_info:.*}' % s, controller=mrh,
conditions=ec2api_subdomain)
super(API, self).__init__(mapper)
@webob.dec.wsgify
def osapi_versions(self, req):
"""Respond to a request for all OpenStack API versions."""
response = {
"versions": [
dict(status="CURRENT", id="v1.0")]}
metadata = {
"application/xml": {
"attributes": dict(version=["status", "id"])}}
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
@webob.dec.wsgify
def ec2api_versions(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
return ''.join('%s\n' % v for v in versions)
"""No-op __init__ for directory full of api goodies."""

232
nova/api/direct.py Normal file
View File

@ -0,0 +1,232 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Public HTTP interface that allows services to self-register.
The general flow of a request is:
- Request is parsed into WSGI bits.
- Some middleware checks authentication.
- Routing takes place based on the URL to find a controller.
(/controller/method)
- Parameters are parsed from the request and passed to a method on the
controller as keyword arguments.
- Optionally 'json' is decoded to provide all the parameters.
- Actual work is done and a result is returned.
- That result is turned into json and returned.
"""
import inspect
import urllib
import routes
import webob
from nova import context
from nova import flags
from nova import utils
from nova import wsgi
ROUTES = {}
def register_service(path, handle):
ROUTES[path] = handle
class Router(wsgi.Router):
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self._load_registered_routes(mapper)
super(Router, self).__init__(mapper=mapper)
def _load_registered_routes(self, mapper):
for route in ROUTES:
mapper.connect('/%s/{action}' % route,
controller=ServiceWrapper(ROUTES[route]))
class DelegatedAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
os_user = request.headers['X-OpenStack-User']
os_project = request.headers['X-OpenStack-Project']
context_ref = context.RequestContext(user=os_user, project=os_project)
request.environ['openstack.context'] = context_ref
class JsonParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
if 'json' not in request.params:
return
params_json = request.params['json']
params_parsed = utils.loads(params_json)
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class PostParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class Reflection(object):
"""Reflection methods to list available methods."""
def __init__(self):
self._methods = {}
self._controllers = {}
def _gather_methods(self):
methods = {}
controllers = {}
for route, handler in ROUTES.iteritems():
controllers[route] = handler.__doc__.split('\n')[0]
for k in dir(handler):
if k.startswith('_'):
continue
f = getattr(handler, k)
if not callable(f):
continue
# bunch of ugly formatting stuff
argspec = inspect.getargspec(f)
args = [x for x in argspec[0]
if x != 'self' and x != 'context']
defaults = argspec[3] and argspec[3] or []
args_r = list(reversed(args))
defaults_r = list(reversed(defaults))
args_out = []
while args_r:
if defaults_r:
args_out.append((args_r.pop(0),
repr(defaults_r.pop(0))))
else:
args_out.append((str(args_r.pop(0)),))
# if the method accepts keywords
if argspec[2]:
args_out.insert(0, ('**%s' % argspec[2],))
methods['/%s/%s' % (route, k)] = {
'short_doc': f.__doc__.split('\n')[0],
'doc': f.__doc__,
'name': k,
'args': list(reversed(args_out))}
self._methods = methods
self._controllers = controllers
def get_controllers(self, context):
"""List available controllers."""
if not self._controllers:
self._gather_methods()
return self._controllers
def get_methods(self, context):
"""List available methods."""
if not self._methods:
self._gather_methods()
method_list = self._methods.keys()
method_list.sort()
methods = {}
for k in method_list:
methods[k] = self._methods[k]['short_doc']
return methods
def get_method_info(self, context, method):
"""Get detailed information about a method."""
if not self._methods:
self._gather_methods()
return self._methods[method]
class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
del arg_dict['action']
context = req.environ['openstack.context']
# allow middleware up the stack to override the params
params = {}
if 'openstack.params' in req.environ:
params = req.environ['openstack.params']
# TODO(termie): do some basic normalization on methods
method = getattr(self.service_handle, action)
result = method(context, **params)
if type(result) is dict or type(result) is list:
return self._serialize(result, req)
else:
return result
class Proxy(object):
"""Pretend a Direct API endpoint is an object."""
def __init__(self, app, prefix=None):
self.app = app
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
req = webob.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context
resp = req.get_response(self.app)
try:
return utils.loads(resp.body)
except Exception:
return resp.body
def __getattr__(self, key):
if self.prefix is None:
return self.__class__(self.app, prefix=key)
def _wrapper(context, **kwargs):
return self.__do_request('/%s/%s' % (self.prefix, key),
context,
**kwargs)
_wrapper.func_name = key
return _wrapper

View File

@ -20,7 +20,7 @@ Starting point for routing EC2 requests.
"""
import logging
import datetime
import routes
import webob
import webob.dec
@ -29,19 +29,18 @@ import webob.exc
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
from nova.api.ec2 import admin
from nova.api.ec2 import cloud
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api")
flags.DEFINE_boolean('use_forwarded_for', False,
'Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
flags.DEFINE_boolean('use_lockout', False,
'Whether or not to use lockout middleware.')
flags.DEFINE_integer('lockout_attempts', 5,
'Number of failed auths before lockout.')
flags.DEFINE_integer('lockout_minutes', 15,
@ -52,17 +51,42 @@ flags.DEFINE_list('lockout_memcached_servers', None,
'Memcached servers or None for in process cache.')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify
def __call__(self, req):
rv = req.get_response(self.application)
self.log_request_completion(rv, req)
return rv
class API(wsgi.Middleware):
"""Routing for all EC2 API requests."""
def __init__(self):
self.application = Authenticate(Router(Authorizer(Executor())))
if FLAGS.use_lockout:
self.application = Lockout(self.application)
def log_request_completion(self, response, request):
controller = request.environ.get('ec2.controller', None)
if controller:
controller = controller.__class__.__name__
action = request.environ.get('ec2.action', None)
ctxt = request.environ.get('ec2.context', None)
seconds = 'X'
microseconds = 'X'
if ctxt:
delta = datetime.datetime.utcnow() - \
ctxt.timestamp
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
request.path_info,
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
@ -98,7 +122,7 @@ class Lockout(wsgi.Middleware):
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= FLAGS.lockout_attempts:
detail = "Too many failed authentications."
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
@ -107,9 +131,9 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
_log.warn('Access key %s has had %d failed authentications'
' and will be locked out for %d minutes.' %
(access_key, failures, FLAGS.lockout_minutes))
LOG.warn(_('Access key %s has had %d failed authentications'
' and will be locked out for %d minutes.'),
access_key, failures, FLAGS.lockout_minutes)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@ -142,8 +166,9 @@ class Authenticate(wsgi.Middleware):
req.method,
req.host,
req.path)
except exception.Error, ex:
logging.debug(_("Authentication Failure: %s") % ex)
# Be explicit for what exceptions are 403, the rest bubble as 500
except (exception.NotFound, exception.NotAuthorized) as ex:
LOG.audit(_("Authentication Failure: %s"), str(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@ -154,29 +179,19 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
project.name, context=req.environ['ec2.context'])
return self.application
class Router(wsgi.Middleware):
class Requestify(wsgi.Middleware):
"""Add ec2.'controller', .'action', and .'action_args' to WSGI environ."""
def __init__(self, application):
super(Router, self).__init__(application)
self.map = routes.Mapper()
self.map.connect("/{controller_name}/")
self.controllers = dict(Cloud=cloud.CloudController(),
Admin=admin.AdminController())
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = utils.import_class(controller)()
@webob.dec.wsgify
def __call__(self, req):
# Obtain the appropriate controller and action for this request.
try:
match = self.map.match(req.path_info)
controller_name = match['controller_name']
controller = self.controllers[controller_name]
except:
raise webob.exc.HTTPNotFound()
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
@ -189,13 +204,13 @@ class Router(wsgi.Middleware):
except:
raise webob.exc.HTTPBadRequest()
_log.debug(_('action: %s') % action)
LOG.debug(_('action: %s'), action)
for key, value in args.items():
_log.debug(_('arg: %s\t\tval: %s') % (key, value))
LOG.debug(_('arg: %s\t\tval: %s'), key, value)
# Success!
req.environ['ec2.controller'] = controller
req.environ['ec2.action'] = action
api_request = apirequest.APIRequest(self.controller, action, args)
req.environ['ec2.request'] = api_request
req.environ['ec2.action_args'] = args
return self.application
@ -256,13 +271,14 @@ class Authorizer(wsgi.Middleware):
@webob.dec.wsgify
def __call__(self, req):
context = req.environ['ec2.context']
controller_name = req.environ['ec2.controller'].__class__.__name__
action = req.environ['ec2.action']
allowed_roles = self.action_roles[controller_name].get(action,
['none'])
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_("Unauthorized request for controller=%s "
"and action=%s"), controller, action, context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@ -289,23 +305,28 @@ class Executor(wsgi.Application):
@webob.dec.wsgify
def __call__(self, req):
context = req.environ['ec2.context']
controller = req.environ['ec2.controller']
action = req.environ['ec2.action']
args = req.environ['ec2.action_args']
api_request = apirequest.APIRequest(controller, action)
api_request = req.environ['ec2.request']
result = None
try:
result = api_request.send(context, **args)
result = api_request.invoke(context)
except exception.NotFound as ex:
LOG.info(_('NotFound raised: %s'), str(ex), context=context)
return self._error(req, context, type(ex).__name__, str(ex))
except exception.ApiError as ex:
LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
if ex.code:
return self._error(req, ex.code, ex.message)
return self._error(req, context, ex.code, str(ex))
else:
return self._error(req, type(ex).__name__, ex.message)
# TODO(vish): do something more useful with unknown exceptions
return self._error(req, context, type(ex).__name__, str(ex))
except Exception as ex:
return self._error(req, type(ex).__name__, str(ex))
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), str(ex),
extra=extra, context=context)
return self._error(req,
context,
'UnknownError',
_('An unknown error has occurred. '
'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
@ -313,15 +334,16 @@ class Executor(wsgi.Application):
resp.body = str(result)
return resp
def _error(self, req, code, message):
logging.error("%s: %s", code, message)
def _error(self, req, context, code, message):
LOG.error("%s: %s", code, message, context=context)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
(code, message, context.request_id))
return resp
@ -343,29 +365,3 @@ class Versions(wsgi.Application):
'2009-04-04',
]
return ''.join('%s\n' % v for v in versions)
def authenticate_factory(global_args, **local_args):
def authenticator(app):
return Authenticate(app)
return authenticator
def router_factory(global_args, **local_args):
def router(app):
return Router(app)
return router
def authorizer_factory(global_args, **local_args):
def authorizer(app):
return Authorizer(app)
return authorizer
def executor_factory(global_args, **local_args):
return Executor()
def versions_factory(global_args, **local_args):
return Versions()

View File

@ -24,9 +24,13 @@ import base64
from nova import db
from nova import exception
from nova import log as logging
from nova.auth import manager
LOG = logging.getLogger('nova.api.ec2.admin')
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@ -75,17 +79,18 @@ class AdminController(object):
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()]}
def register_user(self, _context, name, **_kwargs):
def register_user(self, context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
LOG.audit(_("Creating new user: %s"), name, context=context)
return user_dict(manager.AuthManager().create_user(name))
def deregister_user(self, _context, name, **_kwargs):
def deregister_user(self, context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
LOG.audit(_("Deleting user: %s"), name, context=context)
manager.AuthManager().delete_user(name)
return True
def describe_roles(self, context, project_roles=True, **kwargs):
@ -105,15 +110,27 @@ class AdminController(object):
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
if project:
LOG.audit(_("Adding role %s to user %s for project %s"), role,
user, project, context=context)
else:
LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
if project:
LOG.audit(_("Removing role %s from user %s for project %s"),
role, user, project, context=context)
else:
LOG.audit(_("Removing sitewide role %s from user %s"), role,
user, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError('operation must be add or remove')
raise exception.ApiError(_('operation must be add or remove'))
return True
def generate_x509_for_user(self, _context, name, project=None, **kwargs):
def generate_x509_for_user(self, context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
@ -122,6 +139,8 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
project, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@ -137,6 +156,8 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
LOG.audit(_("Create project %s managed by %s"), name, manager_user,
context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@ -146,6 +167,7 @@ class AdminController(object):
def deregister_project(self, context, name):
"""Permanently deletes a project."""
LOG.audit(_("Delete project: %s"), name, context=context)
manager.AuthManager().delete_project(name)
return True
@ -159,11 +181,15 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
LOG.audit(_("Adding user %s to project %s"), user, project,
context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
LOG.audit(_("Removing user %s from project %s"), user, project,
context=context)
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError('operation must be add or remove')
raise exception.ApiError(_('operation must be add or remove'))
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the

View File

@ -20,13 +20,13 @@
APIRequest class
"""
import logging
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
from nova import log as logging
LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@ -83,24 +83,25 @@ def _try_convert(value):
class APIRequest(object):
def __init__(self, controller, action):
def __init__(self, controller, action, args):
self.controller = controller
self.action = action
self.args = args
def send(self, context, **kwargs):
def invoke(self, context):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in kwargs.items():
for key, value in self.args.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if isinstance(value, str) or isinstance(value, unicode):
@ -142,7 +143,7 @@ class APIRequest(object):
response = xml.toxml()
xml.unlink()
_log.debug(response)
LOG.debug(response)
return response
def _render_dict(self, xml, el, data):
@ -151,7 +152,7 @@ class APIRequest(object):
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
_log.debug(data)
LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):

View File

@ -24,26 +24,28 @@ datastore.
import base64
import datetime
import logging
import re
import os
from nova import context
import IPy
import os
import urllib
from nova import compute
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
FLAGS = flags.FLAGS
flags.DECLARE('service_down_time', 'nova.scheduler.driver')
LOG = logging.getLogger("nova.api.cloud")
InvalidInputException = exception.InvalidInputException
@ -72,17 +74,13 @@ def _gen_key(context, user_id, key_name):
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 36 number]) to an instance id (int)"""
return int(ec2_id[2:], 36)
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
return int(ec2_id.split('-')[-1], 16)
def id_to_ec2_id(instance_id):
"""Convert an instance ID (int) to an ec2 ID (i-[base 36 number])"""
digits = []
while instance_id != 0:
instance_id, remainder = divmod(instance_id, 36)
digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder])
return "i-%s" % ''.join(reversed(digits))
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id
class CloudController(object):
@ -94,8 +92,11 @@ class CloudController(object):
self.image_service = utils.import_object(FLAGS.image_service)
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(self.image_service, self.network_api,
self.volume_api)
self.compute_api = compute.API(
network_api=self.network_api,
image_service=self.image_service,
volume_api=self.volume_api,
hostname_factory=id_to_ec2_id)
self.setup()
def __str__(self):
@ -131,14 +132,11 @@ class CloudController(object):
result[key] = [line]
return result
def _trigger_refresh_security_group(self, context, security_group):
nodes = set([instance['host'] for instance in security_group.instances
if instance['host'] is not None])
for node in nodes:
rpc.cast(context,
'%s.%s' % (FLAGS.compute_topic, node),
{"method": "refresh_security_group",
"args": {"security_group_id": security_group.id}})
def _get_availability_zone_by_host(self, context, host):
services = db.service_get_all_by_host(context, host)
if len(services) > 0:
return services[0]['availability_zone']
return 'unknown zone'
def get_metadata(self, address):
ctxt = context.get_admin_context()
@ -152,6 +150,8 @@ class CloudController(object):
else:
keys = ''
hostname = instance_ref['hostname']
host = instance_ref['host']
availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = id_to_ec2_id(instance_ref['id'])
@ -174,8 +174,7 @@ class CloudController(object):
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
# TODO(vish): real zone
'placement': {'availability-zone': 'nova'},
'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
@ -199,15 +198,33 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
enabled_services = db.service_get_all(context)
disabled_services = db.service_get_all(context, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
if not zone in available_zones:
available_zones.append(zone)
not_available_zones = []
for zone in [service.availability_zone for service in disabled_services
if not service['availability_zone'] in available_zones]:
if not zone in not_available_zones:
not_available_zones.append(zone)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
services = db.service_get_all(context)
now = db.get_time()
now = datetime.datetime.utcnow()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
@ -237,16 +254,17 @@ class CloudController(object):
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix,
host,
FLAGS.cc_port,
FLAGS.ec2_port,
FLAGS.ec2_suffix)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
FLAGS.cc_host,
FLAGS.cc_port,
FLAGS.ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_suffix)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
@ -282,6 +300,7 @@ class CloudController(object):
return {'keypairsSet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user.id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
@ -289,6 +308,7 @@ class CloudController(object):
# TODO(vish): when context is no longer an object, pass it here
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
db.key_pair_destroy(context, context.user.id, key_name)
except exception.NotFound:
@ -349,6 +369,7 @@ class CloudController(object):
values['group_id'] = source_security_group['id']
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
IPy.IP(cidr_ip)
values['cidr'] = cidr_ip
else:
@ -395,6 +416,8 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
LOG.audit(_("Revoke security group ingress %s"), group_name,
context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
@ -402,8 +425,8 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
raise exception.ApiError(_("No rule for the specified "
"parameters."))
raise exception.ApiError(_("Not enough parameters to build a "
"valid rule."))
for rule in security_group.rules:
match = True
@ -412,7 +435,8 @@ class CloudController(object):
match = False
if match:
db.security_group_rule_destroy(context, rule['id'])
self._trigger_refresh_security_group(context, security_group)
self.compute_api.trigger_security_group_rules_refresh(context,
security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@ -421,12 +445,17 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
LOG.audit(_("Authorize security group ingress %s"), group_name,
context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
values = self._revoke_rule_args_to_dict(context, **kwargs)
if values is None:
raise exception.ApiError(_("Not enough parameters to build a "
"valid rule."))
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
@ -435,7 +464,8 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
self._trigger_refresh_security_group(context, security_group)
self.compute_api.trigger_security_group_rules_refresh(context,
security_group['id'])
return True
@ -457,6 +487,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
LOG.audit(_("Create Security Group %s"), group_name, context=context)
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
@ -471,6 +502,7 @@ class CloudController(object):
group_ref)]}
def delete_security_group(self, context, group_name, **kwargs):
LOG.audit(_("Delete security group %s"), group_name, context=context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@ -478,22 +510,26 @@ class CloudController(object):
return True
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
instance_id = ec2_id_to_id(ec2_id)
instance_ref = self.compute_api.get(context, instance_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
"args": {"instance_id": instance_ref['id']}})
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = datetime.datetime.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
internal_id = ec2_id_to_id(ec2_id)
return self.compute_api.get_ajax_console(context, internal_id)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volume_id = [ec2_id_to_id(x) for x in volume_id]
volumes = self.volume_api.get_all(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
volumes = [self._format_volume(context, v) for v in volumes
@ -509,7 +545,7 @@ class CloudController(object):
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
v['volumeId'] = volume['id']
v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@ -527,7 +563,8 @@ class CloudController(object):
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volume_id': volume['ec2_id']}]
'volumeId': id_to_ec2_id(volume['id'],
'vol-%08x')}]
else:
v['attachmentSet'] = [{}]
@ -536,19 +573,22 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
LOG.audit(_("Create volume of %s GB"), size, context=context)
volume = self.volume_api.create(context, size,
kwargs.get('display_name'),
kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
return {'volumeSet': [self._format_volume(context, dict(volume))]}
def delete_volume(self, context, volume_id, **kwargs):
self.volume_api.delete(context, volume_id)
volume_id = ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
@ -559,24 +599,33 @@ class CloudController(object):
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
self.compute_api.attach_volume(context, instance_id, volume_id, device)
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
instance_id, device, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
device=device)
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': instance_id,
'instanceId': id_to_ec2_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': volume_id}
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
def detach_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': id_to_ec2_id(instance['id']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': volume_id}
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@ -586,19 +635,32 @@ class CloudController(object):
return [{label: x} for x in lst]
def describe_instances(self, context, **kwargs):
return self._format_describe_instances(context)
return self._format_describe_instances(context, **kwargs)
def _format_describe_instances(self, context):
return {'reservationSet': self._format_instances(context)}
def describe_instances_v6(self, context, **kwargs):
kwargs['use_v6'] = True
return self._format_describe_instances(context, **kwargs)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id)
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_instances(self, context, **kwargs):
def _format_instances(self, context, instance_id=None, **kwargs):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
instances = self.compute_api.get_all(context, **kwargs)
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instance_id = [ec2_id_to_id(x) for x in instance_id]
instances = [self.compute_api.get(context, x) for x in instance_id]
else:
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
@ -618,10 +680,16 @@ class CloudController(object):
if instance['fixed_ip']['floating_ips']:
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
i['dnsNameV6'] = utils.to_global_ipv6(
instance['fixed_ip']['network']['cidr_v6'],
instance['mac_address'])
i['privateDnsName'] = fixed_addr
i['publicDnsName'] = floating_addr
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.user.is_admin():
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
@ -632,6 +700,9 @@ class CloudController(object):
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
host = instance['host']
zone = self._get_availability_zone_by_host(context, host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
@ -670,27 +741,35 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
public_ip = self.network_api.allocate_floating_ip(context)
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
self.network_api.release_floating_ip(context, public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %s to instance %s"), public_ip,
instance_id, context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context, instance_id, public_ip)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
self.network_api.disassociate_floating_ip(context, public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
instances = self.compute_api.create(context,
instance_types.get_by_type(kwargs.get('instance_type', None)),
kwargs['image_id'],
instance_type=instance_types.get_by_type(
kwargs.get('instance_type', None)),
image_id=kwargs['image_id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id', None),
@ -701,37 +780,37 @@ class CloudController(object):
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
generate_hostname=id_to_ec2_id)
'AvailabilityZone'))
return self._format_run_instances(context,
instances[0]['reservation_id'])
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
logging.debug("Going to start terminating instances")
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id)
self.compute_api.delete(context, instance_id=instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id)
self.compute_api.reboot(context, instance_id=instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
return True
def update_instance(self, context, ec2_id, **kwargs):
@ -742,7 +821,7 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.update(context, instance_id, **kwargs)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
def describe_images(self, context, image_id=None, **kwargs):
@ -753,6 +832,7 @@ class CloudController(object):
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
self.image_service.deregister(context, image_id)
return {'imageId': image_id}
@ -760,7 +840,8 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
LOG.audit(_("Registered image %s with id %s"), image_location,
image_id, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
@ -788,6 +869,7 @@ class CloudController(object):
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):

View File

@ -18,19 +18,20 @@
"""Metadata request handler."""
import logging
import webob.dec
import webob.exc
from nova import log as logging
from nova import flags
from nova import wsgi
from nova.api.ec2 import cloud
LOG = logging.getLogger('nova.api.ec2.metadata')
FLAGS = flags.FLAGS
class MetadataRequestHandler(object):
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata from the EC2 API."""
def print_data(self, data):
@ -72,14 +73,9 @@ class MetadataRequestHandler(object):
remote_address = req.headers.get('X-Forwarded-For', remote_address)
meta_data = cc.get_metadata(remote_address)
if meta_data is None:
logging.error(_('Failed to get metadata for ip: %s') %
remote_address)
LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
raise webob.exc.HTTPNotFound()
return self.print_data(data)
def metadata_factory(global_args, **local_args):
return MetadataRequestHandler()

View File

@ -20,59 +20,41 @@
WSGI middleware for OpenStack API controllers.
"""
import time
import logging
import routes
import traceback
import webob.dec
import webob.exc
import webob
from nova import context
from nova import flags
from nova import utils
from nova import log as logging
from nova import wsgi
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import ratelimiting
from nova.api.openstack import servers
from nova.api.openstack import sharedipgroups
from nova.api.openstack import shared_ip_groups
LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
flags.DEFINE_string('os_api_auth',
'nova.api.openstack.auth.AuthMiddleware',
'The auth mechanism to use for the OpenStack API implemenation')
flags.DEFINE_string('os_api_ratelimiting',
'nova.api.openstack.ratelimiting.RateLimitingMiddleware',
'Default ratelimiting implementation for the Openstack API')
flags.DEFINE_string('os_krm_mapping_file',
'krm_mapping.json',
'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
class API(wsgi.Middleware):
"""WSGI entry point for all OpenStack API requests."""
def __init__(self):
auth_middleware = utils.import_class(FLAGS.os_api_auth)
ratelimiting_middleware = \
utils.import_class(FLAGS.os_api_ratelimiting)
app = auth_middleware(ratelimiting_middleware(APIRouter()))
super(API, self).__init__(app)
class FaultWrapper(wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
logging.warn(_("Caught error: %s") % str(ex))
logging.error(traceback.format_exc())
LOG.exception(_("Caught error: %s"), str(ex))
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@ -83,12 +65,17 @@ class APIRouter(wsgi.Router):
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
return cls()
def __init__(self):
mapper = routes.Mapper()
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
logging.debug("Including admin operations in API.")
LOG.debug(_("Including admin operations in API."))
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
@ -105,12 +92,18 @@ class APIRouter(wsgi.Router):
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("console", "consoles",
controller=consoles.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("image", "images", controller=images.Controller(),
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors", controller=flavors.Controller(),
collection={'detail': 'GET'})
mapper.resource("sharedipgroup", "sharedipgroups",
controller=sharedipgroups.Controller())
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
controller=shared_ip_groups.Controller())
super(APIRouter, self).__init__(mapper)
@ -126,11 +119,3 @@ class Versions(wsgi.Application):
"application/xml": {
"attributes": dict(version=["status", "id"])}}
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
def router_factory(global_cof, **local_conf):
return APIRouter()
def versions_factory(global_conf, **local_conf):
return Versions()

View File

@ -134,9 +134,3 @@ class AuthMiddleware(wsgi.Middleware):
token = self.db.auth_create_token(ctxt, token_dict)
return token, user
return None, None
def auth_factory(global_conf, **local_conf):
def auth(app):
return AuthMiddleware(app)
return auth

View File

@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
from webob import exc
from nova import wsgi
@ -46,8 +48,8 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
both create and update through a POST """
return faults.Fault(exc.HTTPNotFound())
return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotFound())
return faults.Fault(exc.HTTPNotImplemented())

View File

@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
@ -34,3 +36,25 @@ def limited(items, req):
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
def get_image_id_from_image_hash(image_service, context, image_hash):
"""Given an Image ID Hash, return an objectstore Image ID.
image_service - reference to objectstore compatible image service.
context - security context for image service requests.
image_hash - hash of the image ID.
"""
# FIX(sandy): This is terribly inefficient. It pulls all images
# from objectstore in order to find the match. ObjectStore
# should have a numeric counterpart to the string ID.
try:
items = image_service.detail(context)
except NotImplementedError:
items = image_service.index(context)
for image in items:
image_id = image['imageId']
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)

View File

@ -0,0 +1,96 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova import console
from nova import exception
from nova import wsgi
from nova.api.openstack import faults
def _translate_keys(cons):
"""Coerces a console instance into proper dictionary format """
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type']}
return dict(console=info)
def _translate_detail_keys(cons):
"""Coerces a console instance into proper dictionary format with
correctly mapped attributes """
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type'],
'password': cons['password'],
'port': cons['port'],
'host': pool['public_hostname']}
return dict(console=info)
class Controller(wsgi.Controller):
"""The Consoles Controller for the Openstack API"""
_serialization_metadata = {
'application/xml': {
'attributes': {
'console': []}}}
def __init__(self):
self.console_api = console.API()
super(Controller, self).__init__()
def index(self, req, server_id):
"""Returns a list of consoles for this instance"""
consoles = self.console_api.get_consoles(
req.environ['nova.context'],
int(server_id))
return dict(consoles=[_translate_keys(console)
for console in consoles])
def create(self, req, server_id):
"""Creates a new console"""
#info = self._deserialize(req.body, req)
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
def show(self, req, server_id, id):
"""Shows in-depth information on a specific console"""
try:
console = self.console_api.get_console(
req.environ['nova.context'],
int(server_id),
int(id))
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return _translate_detail_keys(console)
def update(self, req, server_id, id):
"""You can't update a console"""
raise faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
"""Deletes a console"""
try:
self.console_api.delete_console(req.environ['nova.context'],
int(server_id),
int(id))
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()

View File

@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from webob import exc
from nova import compute
@ -26,6 +28,7 @@ from nova.api.openstack import common
from nova.api.openstack import faults
import nova.image.service
FLAGS = flags.FLAGS
@ -75,7 +78,14 @@ def _translate_status(item):
'decrypting': 'preparing',
'untarring': 'saving',
'available': 'active'}
item['status'] = status_mapping[item['status']]
try:
item['status'] = status_mapping[item['status']]
except KeyError:
# TODO(sirp): Performing translation of status (if necessary) here for
# now. Perhaps this should really be done in EC2 API and
# S3ImageService
pass
return item
@ -88,6 +98,14 @@ def _filter_keys(item, keys):
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _convert_image_id_to_hash(image):
if 'imageId' in image:
# Convert EC2-style ID (i-blah) to Rackspace-style (int)
image_id = abs(hash(image['imageId']))
image['imageId'] = image_id
image['id'] = image_id
class Controller(wsgi.Controller):
_serialization_metadata = {
@ -112,6 +130,9 @@ class Controller(wsgi.Controller):
items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
items = self._service.index(req.environ['nova.context'])
for image in items:
_convert_image_id_to_hash(image)
items = common.limited(items, req)
items = [_translate_keys(item) for item in items]
items = [_translate_status(item) for item in items]
@ -119,7 +140,12 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given image id"""
return dict(image=self._service.show(req.environ['nova.context'], id))
image_id = common.get_image_id_from_image_hash(self._service,
req.environ['nova.context'], id)
image = self._service.show(req.environ['nova.context'], image_id)
_convert_image_id_to_hash(image)
return dict(image=image)
def delete(self, req, id):
# Only public images are supported for now.
@ -130,7 +156,11 @@ class Controller(wsgi.Controller):
env = self._deserialize(req.body, req)
instance_id = env["image"]["serverId"]
name = env["image"]["name"]
return compute.API().snapshot(context, instance_id, name)
image_meta = compute.API().snapshot(
context, instance_id, name)
return dict(image=image_meta)
def update(self, req, id):
# Users may not modify public images, and that's all that

View File

@ -219,9 +219,3 @@ class WSGIAppProxy(object):
# No delay
return None
return float(resp.getheader('X-Wait-Seconds'))
def ratelimit_factory(global_conf, **local_conf):
def rl(app):
return RateLimitingMiddleware(app)
return rl

View File

@ -15,14 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import json
import traceback
from webob import exc
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova import wsgi
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
@ -35,6 +38,9 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
FLAGS = flags.FLAGS
def _translate_detail_keys(inst):
""" Coerces into dictionary format, mapping everything to Rackspace-like
attributes for return"""
@ -44,7 +50,7 @@ def _translate_detail_keys(inst):
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.SUSPENDED: 'suspended',
power_state.PAUSED: 'error',
power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
@ -81,6 +87,7 @@ class Controller(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API()
self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
def index(self, req):
@ -117,6 +124,18 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
def _get_kernel_ramdisk_from_image(self, image_id):
mapping_filename = FLAGS.os_krm_mapping_file
with open(mapping_filename) as f:
mapping = json.load(f)
if image_id in mapping:
return mapping[image_id]
raise exception.NotFound(
_("No entry for image '%s' in mapping file '%s'") %
(image_id, mapping_filename))
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@ -125,10 +144,15 @@ class Controller(wsgi.Controller):
key_pair = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0]
image_id = common.get_image_id_from_image_hash(self._image_service,
req.environ['nova.context'], env['server']['imageId'])
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
env['server']['imageId'],
image_id,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
display_name=env['server']['name'],
display_description=env['server']['name'],
key_name=key_pair['name'],
@ -141,15 +165,18 @@ class Controller(wsgi.Controller):
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
if 'adminPass' in inst_dict['server']:
update_dict['admin_pass'] = inst_dict['server']['adminPass']
try:
self.compute_api.set_admin_password(ctxt, id)
except exception.TimeoutException, e:
return exc.HTTPRequestTimeout()
if 'name' in inst_dict['server']:
update_dict['display_name'] = inst_dict['server']['name']
try:
self.compute_api.update(req.environ['nova.context'], id,
**update_dict)
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPNoContent()
@ -158,6 +185,7 @@ class Controller(wsgi.Controller):
""" Multi-purpose method used to reboot, rebuild, and
resize a server """
input_dict = self._deserialize(req.body, req)
#TODO(sandy): rebuild/resize not supported.
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@ -170,6 +198,50 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def lock(self, req, id):
"""
lock the instance with id
admin only operation
"""
context = req.environ['nova.context']
try:
self.compute_api.lock(context, id)
except:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::lock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def unlock(self, req, id):
"""
unlock the instance with id
admin only operation
"""
context = req.environ['nova.context']
try:
self.compute_api.unlock(context, id)
except:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unlock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def get_lock(self, req, id):
"""
return the boolean state of (instance with id)'s lock
"""
context = req.environ['nova.context']
try:
self.compute_api.get_lock(context, id)
except:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::get_lock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@ -177,7 +249,7 @@ class Controller(wsgi.Controller):
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
logging.error(_("Compute.api::pause %s"), readable)
LOG.exception(_("Compute.api::pause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@ -188,7 +260,7 @@ class Controller(wsgi.Controller):
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
logging.error(_("Compute.api::unpause %s"), readable)
LOG.exception(_("Compute.api::unpause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@ -199,7 +271,7 @@ class Controller(wsgi.Controller):
self.compute_api.suspend(context, id)
except:
readable = traceback.format_exc()
logging.error(_("compute.api::suspend %s"), readable)
LOG.exception(_("compute.api::suspend %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@ -210,10 +282,19 @@ class Controller(wsgi.Controller):
self.compute_api.resume(context, id)
except:
readable = traceback.format_exc()
logging.error(_("compute.api::resume %s"), readable)
LOG.exception(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def get_ajax_console(self, req, id):
""" Returns a url to an instance's ajaxterm console. """
try:
self.compute_api.get_ajax_console(req.environ['nova.context'],
int(id))
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
@ -222,4 +303,13 @@ class Controller(wsgi.Controller):
def actions(self, req, id):
"""Permit Admins to retrieve server actions."""
ctxt = req.environ["nova.context"]
return self.compute_api.get_actions(ctxt, id)
items = self.compute_api.get_actions(ctxt, id)
actions = []
# TODO(jk0): Do not do pre-serialization here once the default
# serializer is updated
for item in items:
actions.append(dict(
created_at=str(item.created_at),
action=item.action,
error=item.error))
return dict(actions=actions)

View File

@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from webob import exc
from nova import wsgi
@ -29,7 +31,7 @@ def _translate_keys(inst):
def _translate_detail_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format with
correctly mapped attributes """
return dict(sharedIpGroup=inst)
return dict(sharedIpGroups=inst)
class Controller(wsgi.Controller):
@ -54,12 +56,12 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Deletes a Shared IP Group """
raise faults.Fault(exc.HTTPNotFound())
raise faults.Fault(exc.HTTPNotImplemented())
def detail(self, req, id):
def detail(self, req):
""" Returns a complete list of Shared IP Groups """
return _translate_detail_keys({})
def create(self, req):
""" Creates a new Shared IP group """
raise faults.Fault(exc.HTTPNotFound())
raise faults.Fault(exc.HTTPNotImplemented())

View File

@ -20,7 +20,6 @@
Auth driver using the DB as its backend.
"""
import logging
import sys
from nova import context

View File

@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
public methods.
"""
import logging
import sys
from nova import exception
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
LOG = logging.getLogger("nova.ldapdriver")
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
@ -117,8 +119,7 @@ class LdapDriver(object):
def get_project(self, pid):
"""Retrieve project by id"""
dn = 'cn=%s,%s' % (pid,
FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
@ -226,7 +227,8 @@ class LdapDriver(object):
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
dn = self.__project_to_dn(name, search=False)
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
@ -244,23 +246,22 @@ class LdapDriver(object):
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
self.conn.modify_s('cn=%s,%s' % (project_id,
FLAGS.ldap_project_subtree),
attr)
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
@ -300,7 +301,7 @@ class LdapDriver(object):
roles.append(role)
return roles
else:
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
@ -333,7 +334,7 @@ class LdapDriver(object):
def delete_project(self, project_id):
"""Delete a project"""
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@ -365,9 +366,10 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
attr = self.__find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)')
return attr
dn = FLAGS.ldap_user_subtree
query = ('(&(%s=%s)(objectclass=novaUser))' %
(FLAGS.ldap_user_id_attribute, uid))
return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@ -418,15 +420,13 @@ class LdapDriver(object):
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
@staticmethod
def __role_to_dn(role, project_id=None):
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
return 'cn=%s,cn=%s,%s' % (role,
project_id,
FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
@ -502,8 +502,8 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
@ -532,6 +532,42 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
if search:
query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
user = self.__find_dns(FLAGS.ldap_user_subtree, query)
if len(user) > 0:
userdn = user[0]
return userdn
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
if search:
query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
project = self.__find_dns(FLAGS.ldap_project_subtree, query)
if len(project) > 0:
projectdn = project[0]
return projectdn
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
@ -548,30 +584,11 @@ class LdapDriver(object):
else:
return None
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@staticmethod
def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
@staticmethod
def __uid_to_dn(uid):
"""Convert uid to dn"""
return (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""

View File

@ -20,7 +20,6 @@
Nova authentication management
"""
import logging
import os
import shutil
import string # pylint: disable-msg=W0402
@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import signer
@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src',
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
LOG = logging.getLogger('nova.auth.manager')
class AuthBase(object):
"""Base class for objects relating to auth
@ -254,43 +256,51 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
logging.info(_('Looking up user: %r'), access_key)
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
logging.info('user: %r', user)
LOG.debug('user: %r', user)
if user == None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project == None:
LOG.audit(_("failed authorization: no project named %s (user=%s)"),
project_id, user.name)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
LOG.audit(_("Failed authorization: user %s not admin and not "
"member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
LOG.debug('user.secret: %s', user.secret)
LOG.debug('expected_signature: %s', expected_signature)
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
LOG.debug('user.secret: %s', user.secret)
LOG.debug('expected_signature: %s', expected_signature)
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
@ -398,6 +408,12 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
if project:
LOG.audit(_("Adding role %s to user %s in project %s"), role,
User.safe_id(user), Project.safe_id(project))
else:
LOG.audit(_("Adding sitewide role %s to user %s"), role,
User.safe_id(user))
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@ -418,6 +434,12 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
if project:
LOG.audit(_("Removing role %s from user %s on project %s"),
role, User.safe_id(user), Project.safe_id(project))
else:
LOG.audit(_("Removing sitewide role %s from user %s"), role,
User.safe_id(user))
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
@ -480,6 +502,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
LOG.audit(_("Created project %s with manager %s"), name,
manager_user)
project = Project(**project_dict)
return project
@ -496,6 +520,7 @@ class AuthManager(object):
@param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
@ -505,6 +530,8 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
Project.safe_id(project))
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
@ -523,6 +550,8 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
Project.safe_id(project))
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
@ -549,6 +578,7 @@ class AuthManager(object):
def delete_project(self, project):
"""Deletes a project"""
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
@ -603,13 +633,16 @@ class AuthManager(object):
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
return User(**user_dict)
rv = User(**user_dict)
LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
@ -618,6 +651,12 @@ class AuthManager(object):
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
if access_key:
LOG.audit(_("Access Key change for user %s"), uid)
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@ -643,10 +682,9 @@ class AuthManager(object):
region, _sep, region_host = item.partition("=")
regions[region] = region_host
else:
regions = {'nova': FLAGS.cc_host}
regions = {'nova': FLAGS.ec2_host}
for region, host in regions.iteritems():
rc = self.__generate_rc(user.access,
user.secret,
rc = self.__generate_rc(user,
pid,
use_dmz,
host)
@ -666,7 +704,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
logging.warn(_("No vpn data for project %s"), pid)
LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
@ -683,30 +721,35 @@ class AuthManager(object):
if project is None:
project = user.id
pid = Project.safe_id(project)
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
return self.__generate_rc(user, pid, use_dmz)
@staticmethod
def __generate_rc(access, secret, pid, use_dmz=True, host=None):
def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
cc_host = FLAGS.cc_dmz
ec2_host = FLAGS.ec2_dmz_host
else:
cc_host = FLAGS.cc_host
ec2_host = FLAGS.ec2_host
# NOTE(vish): Always use the dmz since it is used from inside the
# instance
s3_host = FLAGS.s3_dmz
if host:
s3_host = host
cc_host = host
ec2_host = host
rc = open(FLAGS.credentials_template).read()
rc = rc % {'access': access,
rc = rc % {'access': user.access,
'project': pid,
'secret': secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix),
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme,
ec2_host,
FLAGS.osapi_port,
FLAGS.osapi_path),
'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}

View File

@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
export CLOUD_SERVERS_API_KEY="%(access)s"
export CLOUD_SERVERS_USERNAME="%(user)s"
export CLOUD_SERVERS_URL="%(os)s"

View File

@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
import base64
import hashlib
import hmac
import logging
import urllib
# NOTE(vish): for new boto
@ -54,9 +53,13 @@ import boto
# NOTE(vish): for old boto
import boto.utils
from nova import log as logging
from nova.exception import Error
LOG = logging.getLogger('nova.signer')
class Signer(object):
"""Hacked up code from boto/connection.py"""
@ -120,7 +123,7 @@ class Signer(object):
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
logging.debug('using _calc_signature_2')
LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
@ -136,13 +139,13 @@ class Signer(object):
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
logging.debug('query string: %s', qs)
LOG.debug('query string: %s', qs)
string_to_sign += qs
logging.debug('string_to_sign: %s', string_to_sign)
LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
logging.debug('len(b64)=%d', len(b64))
logging.debug('base64 encoded digest: %s', b64)
LOG.debug('len(b64)=%d', len(b64))
LOG.debug('base64 encoded digest: %s', b64)
return b64

View File

@ -22,7 +22,6 @@ an instance with it.
"""
import logging
import os
import string
import tempfile
@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import manager
# TODO(eday): Eventually changes these to something not ec2-specific
@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask',
_('Netmask to push into openvpn config'))
LOG = logging.getLogger('nova-cloudpipe')
LOG = logging.getLogger('nova.cloudpipe')
class CloudPipe(object):
@ -68,8 +68,8 @@ class CloudPipe(object):
shellfile = open(FLAGS.boot_script_template, "r")
s = string.Template(shellfile.read())
shellfile.close()
boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz,
cc_port=FLAGS.cc_port,
boot_script = s.substitute(cc_dmz=FLAGS.ec2_dmz_host,
cc_port=FLAGS.ec2_port,
dmz_net=FLAGS.dmz_net,
dmz_mask=FLAGS.dmz_mask,
num_vpn=FLAGS.cnt_vpn_clients)

View File

@ -21,12 +21,13 @@ Handles all requests relating to instances (guest vms).
"""
import datetime
import logging
import re
import time
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova import quota
from nova import rpc
@ -36,6 +37,7 @@ from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.compute.api')
def generate_default_hostname(instance_id):
@ -46,7 +48,8 @@ def generate_default_hostname(instance_id):
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
def __init__(self, image_service=None, network_api=None,
volume_api=None, hostname_factory=generate_default_hostname,
**kwargs):
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
@ -57,19 +60,21 @@ class API(base.Base):
if not volume_api:
volume_api = volume.API()
self.volume_api = volume_api
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
"""Get the network topic for an instance."""
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
logging.warning("Instance %d was not found in get_network_topic",
instance_id)
LOG.warning(_("Instance %d was not found in get_network_topic"),
instance_id)
raise e
host = instance['host']
if not host:
raise exception.Error("Instance %d has no host" % instance_id)
raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
@ -80,18 +85,17 @@ class API(base.Base):
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None,
generate_hostname=generate_default_hostname):
availability_zone=None, user_data=None):
"""Create the number of instances requested if quota and
other arguments check out ok."""
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
logging.warn("Quota exceeeded for %s, tried to run %s instances",
context.project_id, min_count)
raise quota.QuotaError("Instance quota exceeded. You can only "
"run %s more instances of this type." %
LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
context.project_id, min_count)
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id
@ -105,8 +109,10 @@ class API(base.Base):
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
logging.debug("Creating a raw instance")
LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
logging.debug("Using Kernel=%s, Ramdisk=%s" %
(kernel_id, ramdisk_id))
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@ -147,11 +153,12 @@ class API(base.Base):
'user_data': user_data or '',
'key_name': key_name,
'key_data': key_data,
'locked': False,
'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
logging.debug(_("Going to run %s instances..."), num_instances)
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@ -168,22 +175,27 @@ class API(base.Base):
security_group_id)
# Set sane defaults if not specified
updates = dict(hostname=generate_hostname(instance_id))
if 'display_name' not in instance:
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
instance.display_name == None):
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
instances.append(instance)
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id}})
"instance_id": instance_id,
"availability_zone": availability_zone}})
return instances
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return [dict(x.iteritems()) for x in instances]
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
@ -202,6 +214,60 @@ class API(base.Base):
'project_id': context.project_id}
db.security_group_create(context, values)
def trigger_security_group_rules_refresh(self, context, security_group_id):
"""Called when a rule is added to or removed from a security_group"""
security_group = self.db.security_group_get(context, security_group_id)
hosts = set()
for instance in security_group['instances']:
if instance['host'] is not None:
hosts.add(instance['host'])
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group.id}})
def trigger_security_group_members_refresh(self, context, group_id):
"""Called when a security group gains a new or loses a member
Sends an update request to each compute node for whom this is
relevant."""
# First, we get the security group rules that reference this group as
# the grantee..
security_group_rules = \
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id)
# ..then we distill the security groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_groups.add(rule['parent_group_id'])
# ..then we find the instances that are members of these groups..
instances = set()
for security_group in security_groups:
for instance in security_group['instances']:
instances.add(instance['id'])
# ...then we find the hosts where they live...
hosts = set()
for instance in instances:
if instance['host']:
hosts.add(instance['host'])
# ...and finally we tell these nodes to refresh their view of this
# particular security group.
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "refresh_security_group_members",
"args": {"security_group_id": group_id}})
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@ -214,20 +280,21 @@ class API(base.Base):
:retval None
"""
return self.db.instance_update(context, instance_id, kwargs)
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
def delete(self, context, instance_id):
logging.debug("Going to try and terminate %s" % instance_id)
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
logging.warning(_("Instance %s was not found during terminate"),
instance_id)
LOG.warning(_("Instance %d was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
logging.warning(_("Instance %s is already being terminated"),
instance_id)
LOG.warning(_("Instance %d is already being terminated"),
instance_id)
return
self.update(context,
@ -238,16 +305,15 @@ class API(base.Base):
host = instance['host']
if host:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
self.db.instance_destroy(context, instance_id)
def get(self, context, instance_id):
"""Get a single instance with the given ID."""
return self.db.instance_get_by_id(context, instance_id)
rv = self.db.instance_get_by_id(context, instance_id)
return dict(rv.iteritems())
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
@ -256,7 +322,7 @@ class API(base.Base):
an admin, it will retreive all instances in the system."""
if reservation_id is not None:
return self.db.instance_get_all_by_reservation(context,
reservation_id)
reservation_id)
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
@ -269,50 +335,74 @@ class API(base.Base):
project_id)
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
"""Generic handler for RPC casts to compute.
:param params: Optional dictionary of arguments to be passed to the
compute worker
:retval None
"""
if not params:
params = {}
if not host:
instance = self.get(context, instance_id)
host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
rpc.cast(context, queue, kwargs)
def _call_compute_message(self, method, context, instance_id, host=None,
params=None):
"""Generic handler for RPC calls to compute.
:param params: Optional dictionary of arguments to be passed to the
compute worker
:retval: Result returned by compute worker
"""
if not params:
params = {}
if not host:
instance = self.get(context, instance_id)
host = instance["host"]
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "snapshot_instance",
"args": {"instance_id": instance_id, "name": name}})
"""Snapshot the given instance.
:retval: A dict containing image metadata
"""
data = {'name': name, 'is_public': False}
image_meta = self.image_service.create(context, data)
params = {'image_id': image_meta['id']}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return image_meta
def reboot(self, context, instance_id):
"""Reboot the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('reboot_instance', context, instance_id)
def pause(self, context, instance_id):
"""Pause the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "pause_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('pause_instance', context, instance_id)
def unpause(self, context, instance_id):
"""Unpause the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unpause_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('unpause_instance', context, instance_id)
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
instance = self.get(context, instance_id)
host = instance["host"]
return rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "get_diagnostics",
"args": {"instance_id": instance_id}})
return self._call_compute_message(
"get_diagnostics",
context,
instance_id)
def get_actions(self, context, instance_id):
"""Retrieve actions for the given instance."""
@ -320,39 +410,55 @@ class API(base.Base):
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "suspend_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('suspend_instance', context, instance_id)
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "resume_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('resume_instance', context, instance_id)
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('rescue_instance', context, instance_id)
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
def set_admin_password(self, context, instance_id):
"""Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id)
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console"""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance_id}})
output = self._call_compute_message('get_ajax_console',
context,
instance_id)
rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
def get_console_output(self, context, instance_id):
"""Get console output for an an instance"""
return self._call_compute_message('get_console_output',
context,
instance_id)
def lock(self, context, instance_id):
"""lock the instance with instance_id"""
self._cast_compute_message('lock_instance', context, instance_id)
def unlock(self, context, instance_id):
"""unlock the instance with instance_id"""
self._cast_compute_message('unlock_instance', context, instance_id)
def get_lock(self, context, instance_id):
"""return the boolean state of (instance with instance_id)'s lock"""
instance = self.get(context, instance_id)
return instance['locked']
def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device):

View File

@ -1,204 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import logging
import os
import tempfile
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
def partition(infile, outfile, local_bytes=0, resize=True,
local_type='ext2', execute=None):
"""
Turns a partition (infile) into a bootable drive image (outfile).
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
formatted as ext2.
::
In the diagram below, dashes represent drive sectors.
+-----+------. . .-------+------. . .------+
| 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
"""
sector_size = 512
file_size = os.path.getsize(infile)
if resize and file_size < FLAGS.minimum_root_size:
last_sector = FLAGS.minimum_root_size / sector_size - 1
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (infile, last_sector, sector_size))
execute('e2fsck -fp %s' % infile, check_exit_code=False)
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
logging.warn(_("Input partition size not evenly divisible by"
" sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
logging.warn(_("Bytes for local storage not evenly divisible"
" by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
primary_first = mbr_last + 1 # b
primary_last = primary_first + primary_sectors - 1 # c
local_first = primary_last + 1 # d
local_last = local_first + local_sectors - 1 # e
last_sector = local_last # e
# create an empty file
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, mbr_last, sector_size))
# make mbr partition
execute('parted --script %s mklabel msdos' % outfile)
# append primary file
execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
% (infile, outfile, FLAGS.block_size))
# make primary partition
execute('parted --script %s mkpart primary %ds %ds'
% (outfile, primary_first, primary_last))
if local_bytes > 0:
# make the file bigger
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, last_sector, sector_size))
# make and format local partition
execute('parted --script %s mkpartfs primary %s %ds %ds'
% (outfile, local_type, local_first, local_last))
def extend(image, size, execute):
file_size = os.path.getsize(image)
if file_size >= size:
return
return execute('truncate -s size %s' % (image,))
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
out, err = execute('sudo losetup --find --show %s' % image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = execute('sudo kpartx -a %s' % device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
if key:
# inject key file
_inject_key_into_fs(key, tmpdir, execute=execute)
if net:
_inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
execute('sudo losetup --detach %s' % device)
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
execute('sudo chown root %s' % sshdir)
execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
execute('sudo chown root:root %s' % netdir)
execute('sudo chmod 755 %s' % netdir)
netfile = os.path.join(netdir, 'interfaces')
execute('sudo tee %s' % netfile, net)

View File

@ -35,10 +35,15 @@ terminating it.
"""
import datetime
import random
import string
import logging
import socket
import functools
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
@ -51,6 +56,47 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
flags.DEFINE_integer('password_length', 12,
'Length of generated admin passwords')
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
LOG = logging.getLogger('nova.compute.manager')
def checks_instance_lock(function):
"""
decorator used for preventing action against locked instances
unless, of course, you happen to be admin
"""
@functools.wraps(function)
def decorated_function(self, context, instance_id, *args, **kwargs):
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
self, context, instance_id, context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
context=context)
LOG.info(_("check_instance_lock: admin: |%s|"), admin,
context=context)
# if admin or unlocked call function otherwise log error
if admin or not locked:
LOG.info(_("check_instance_lock: executing: |%s|"), function,
context=context)
function(self, context, instance_id, *args, **kwargs)
else:
LOG.error(_("check_instance_lock: not executing |%s|"),
function, context=context)
return False
return decorated_function
class ComputeManager(manager.Manager):
@ -85,6 +131,15 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
def get_console_topic(self, context, **_kwargs):
"""Retrieves the console host for a project on this host
Currently this is just set in the flags for each compute
host."""
#TODO(mdragon): perhaps make this variable by console_type?
return self.db.queue_get_for(context,
FLAGS.console_topic,
FLAGS.console_host)
def get_network_topic(self, context, **_kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
@ -99,10 +154,20 @@ class ComputeManager(manager.Manager):
FLAGS.network_topic,
host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@exception.wrap_exception
def refresh_security_group(self, context, security_group_id, **_kwargs):
"""This call passes stright through to the virtualization driver."""
self.driver.refresh_security_group(security_group_id)
def refresh_security_group_rules(self, context,
security_group_id, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception
def refresh_security_group_members(self, context,
security_group_id, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@ -111,7 +176,8 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
logging.debug(_("instance %s: starting..."), instance_id)
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@ -149,8 +215,8 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
logging.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
LOG.exception(_("instance %s: Failed to spawn"), instance_id,
context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@ -158,17 +224,19 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Terminating instance %s"), instance_id, context=context)
if not FLAGS.stub_network:
address = self.db.instance_get_floating_address(context,
instance_ref['id'])
if address:
logging.debug(_("Disassociating address %s") % address)
LOG.debug(_("Disassociating address %s"), address,
context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
@ -180,15 +248,14 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
logging.debug(_("Deallocating address %s") % address)
LOG.debug(_("Deallocating address %s"), address,
context=context)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
logging.debug(_("instance %s: terminating"), instance_id)
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
@ -202,20 +269,22 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this server."""
context = context.elevated()
self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
logging.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id,
instance_ref['state'],
power_state.RUNNING)
LOG.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id,
instance_ref['state'],
power_state.RUNNING,
context=context)
logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -225,7 +294,7 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
def snapshot_instance(self, context, instance_id, name):
def snapshot_instance(self, context, instance_id, image_id):
"""Snapshot an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@ -235,23 +304,51 @@ class ComputeManager(manager.Manager):
# potentially?
self._update_state(context, instance_id)
logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['state'] != power_state.RUNNING:
logging.warn(_('trying to snapshot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id,
instance_ref['state'],
power_state.RUNNING)
LOG.warn(_('trying to snapshot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id, instance_ref['state'], power_state.RUNNING)
self.driver.snapshot(instance_ref, name)
self.driver.snapshot(instance_ref, image_id)
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
logging.warn('trying to reset the password on a non-running '
'instance: %s (state: %s expected: %s)',
instance_ref['id'],
instance_ref['state'],
power_state.RUNNING)
logging.debug('instance %s: setting admin password',
instance_ref['name'])
if new_pass is None:
# Generate a random password
new_pass = self._generate_password(FLAGS.password_length)
self.driver.set_admin_password(instance_ref, new_pass)
self._update_state(context, instance_id)
def _generate_password(self, length=20):
"""Generate a random sequence of letters and digits
to be used as a password.
"""
chrs = string.letters + string.digits
return "".join([random.choice(chrs) for i in xrange(length)])
@exception.wrap_exception
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: rescuing'), instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -261,12 +358,12 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: unrescuing'), instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -280,12 +377,12 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: pausing', instance_id)
LOG.audit(_('instance %s: pausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -297,12 +394,12 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: unpausing', instance_id)
LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -319,17 +416,20 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
logging.debug(_("instance %s: retrieving diagnostics"),
instance_id)
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""suspend the instance with instance_id"""
"""
suspend the instance with instance_id
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: suspending'), instance_id)
LOG.audit(_('instance %s: suspending'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'suspending')
@ -340,12 +440,15 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""resume the suspended instance with instance_id"""
"""
resume the suspended instance with instance_id
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: resuming'), instance_id)
LOG.audit(_('instance %s: resuming'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'resuming')
@ -355,22 +458,67 @@ class ComputeManager(manager.Manager):
instance_id,
result))
@exception.wrap_exception
def lock_instance(self, context, instance_id):
"""
lock the instance with instance_id
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
@exception.wrap_exception
def unlock_instance(self, context, instance_id):
"""
unlock the instance with instance_id
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
@exception.wrap_exception
def get_lock(self, context, instance_id):
"""
return the boolean state of (instance with instance_id)'s lock
"""
context = context.elevated()
LOG.debug(_('instance %s: getting locked state'), instance_id,
context=context)
instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
return self.driver.get_console_output(instance_ref)
@exception.wrap_exception
def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console"""
context = context.elevated()
logging.debug(_("instance %s: getting ajax console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref)
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint, context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@ -385,8 +533,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
logging.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint)
LOG.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint, context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@ -394,17 +542,18 @@ class ComputeManager(manager.Manager):
return True
@exception.wrap_exception
@checks_instance_lock
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
logging.debug(_("instance %s: detaching volume %s"),
instance_id,
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
volume_id, volume_ref['mountpoint'], instance_id,
context=context)
if instance_ref['name'] not in self.driver.list_instances():
logging.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'])
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])

View File

@ -25,19 +25,17 @@ Instance Monitoring:
"""
import datetime
import logging
import os
import sys
import time
import boto
import boto.s3
import rrdtool
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import flags
from nova import log as logging
from nova.virt import connection as virt_connection
@ -91,6 +89,9 @@ RRD_VALUES = {
utcnow = datetime.datetime.utcnow
LOG = logging.getLogger('nova.compute.monitor')
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@ -255,20 +256,20 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
logging.debug(_('updating %s...'), self.instance_id)
LOG.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
if data != None:
logging.debug('CPU: %s', data)
LOG.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
data = self.fetch_net_stats()
logging.debug('NET: %s', data)
LOG.debug('NET: %s', data)
update_rrd(self, 'net', data)
data = self.fetch_disk_stats()
logging.debug('DISK: %s', data)
LOG.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
@ -285,7 +286,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
logging.exception(_('unexpected error during update'))
LOG.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@ -309,7 +310,7 @@ class Instance(object):
self.cputime = float(info['cpu_time'])
self.cputime_last_updated = utcnow()
logging.debug('CPU: %d', self.cputime)
LOG.debug('CPU: %d', self.cputime)
# Skip calculation on first pass. Need delta to get a meaningful value.
if cputime_last_updated == None:
@ -319,17 +320,17 @@ class Instance(object):
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
logging.debug('t = %d', t)
LOG.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
logging.debug('cputime_delta = %s', cputime_delta)
LOG.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
logging.debug('vcpus = %d', vcpus)
LOG.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
@ -351,8 +352,8 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
logging.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
raise
return '%d:%d' % (rd, wr)
@ -373,8 +374,8 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
logging.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
raise
return '%d:%d' % (rx, tx)
@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
logging.exception(_('unexpected exception getting connection'))
LOG.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service):
try:
self.updateInstances_(conn, domain_ids)
except Exception, exn:
logging.exception('updateInstances_')
LOG.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
logging.debug(_('Found instance: %s'), domain_id)
LOG.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]

13
nova/console/__init__.py Normal file
View File

@ -0,0 +1,13 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
:mod:`nova.console` -- Console Prxy to set up VM console access (i.e. with xvp)
=====================================================
.. automodule:: nova.console
:platform: Unix
:synopsis: Wrapper around console proxies such as xvp to set up
multitenant VM console access
.. moduleauthor:: Monsyne Dragon <mdragon@rackspace.com>
"""
from nova.console.api import API

75
nova/console/api.py Normal file
View File

@ -0,0 +1,75 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles ConsoleProxy API requests
"""
from nova import exception
from nova.db import base
from nova import flags
from nova import rpc
FLAGS = flags.FLAGS
class API(base.Base):
"""API for spining up or down console proxy connections"""
def __init__(self, **kwargs):
super(API, self).__init__(**kwargs)
def get_consoles(self, context, instance_id):
return self.db.console_get_all_by_instance(context, instance_id)
def get_console(self, context, instance_id, console_id):
return self.db.console_get(context, console_id, instance_id)
def delete_console(self, context, instance_id, console_id):
console = self.db.console_get(context,
console_id,
instance_id)
pool = console['pool']
rpc.cast(context,
self.db.queue_get_for(context,
FLAGS.console_topic,
pool['host']),
{"method": "remove_console",
"args": {"console_id": console['id']}})
def create_console(self, context, instance_id):
instance = self.db.instance_get(context, instance_id)
#NOTE(mdragon): If we wanted to return this the console info
# here, as we would need to do a call.
# They can just do an index later to fetch
# console info. I am not sure which is better
# here.
rpc.cast(context,
self._get_console_topic(context, instance['host']),
{"method": "add_console",
"args": {"instance_id": instance_id}})
def _get_console_topic(self, context, instance_host):
topic = self.db.queue_get_for(context,
FLAGS.compute_topic,
instance_host)
return rpc.call(context,
topic,
{"method": "get_console_topic", "args": {'fake': 1}})

58
nova/console/fake.py Normal file
View File

@ -0,0 +1,58 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake ConsoleProxy driver for tests.
"""
from nova import exception
class FakeConsoleProxy(object):
"""Fake ConsoleProxy driver."""
@property
def console_type(self):
return "fake"
def setup_console(self, context, console):
"""Sets up actual proxies"""
pass
def teardown_console(self, context, console):
"""Tears down actual proxies"""
pass
def init_host(self):
"""Start up any config'ed consoles on start"""
pass
def generate_password(self, length=8):
"""Returns random console password"""
return "fakepass"
def get_port(self, context):
"""get available port for consoles that need one"""
return 5999
def fix_pool_password(self, password):
"""Trim password to length, and any other massaging"""
return password
def fix_console_password(self, password):
"""Trim password to length, and any other massaging"""
return password

127
nova/console/manager.py Normal file
View File

@ -0,0 +1,127 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Console Proxy Service
"""
import functools
import logging
import socket
from nova import exception
from nova import flags
from nova import manager
from nova import rpc
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('console_driver',
'nova.console.xvp.XVPConsoleProxy',
'Driver to use for the console proxy')
flags.DEFINE_boolean('stub_compute', False,
'Stub calls to compute worker for tests')
flags.DEFINE_string('console_public_hostname',
socket.gethostname(),
'Publicly visable name for this console host')
class ConsoleProxyManager(manager.Manager):
""" Sets up and tears down any proxy connections needed for accessing
instance consoles securely"""
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
console_driver = FLAGS.console_driver
self.driver = utils.import_object(console_driver)
super(ConsoleProxyManager, self).__init__(*args, **kwargs)
self.driver.host = self.host
def init_host(self):
self.driver.init_host()
@exception.wrap_exception
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
host = instance['host']
name = instance['name']
pool = self.get_pool_for_instance_host(context, host)
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance_id)
except exception.NotFound:
logging.debug("Adding console")
if not password:
password = self.driver.generate_password()
if not port:
port = self.driver.get_port(context)
console_data = {'instance_name': name,
'instance_id': instance_id,
'password': password,
'pool_id': pool['id']}
if port:
console_data['port'] = port
console = self.db.console_create(context, console_data)
self.driver.setup_console(context, console)
return console['id']
@exception.wrap_exception
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
except exception.NotFound:
logging.debug(_('Tried to remove non-existant console '
'%(console_id)s.') %
{'console_id': console_id})
return
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)
def get_pool_for_instance_host(self, context, instance_host):
context = context.elevated()
console_type = self.driver.console_type
try:
pool = self.db.console_pool_get_by_host_type(context,
instance_host,
self.host,
console_type)
except exception.NotFound:
#NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we ned to ask.
if FLAGS.stub_compute:
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass'}
else:
pool_info = rpc.call(context,
self.db.queue_get_for(context,
FLAGS.compute_topic,
instance_host),
{"method": "get_console_pool_info",
"args": {"console_type": console_type}})
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
pool_info['public_hostname'] = FLAGS.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool

View File

@ -0,0 +1,16 @@
# One time password use with time window
OTP ALLOW IPCHECK HTTP 60
#if $multiplex_port
MULTIPLEX $multiplex_port
#end if
#for $pool in $pools
POOL $pool.address
DOMAIN $pool.address
MANAGER root $pool.password
HOST $pool.address
VM - dummy 0123456789ABCDEF
#for $console in $pool.consoles
VM #if $multiplex_port then '-' else $console.port # $console.instance_name $pass_encode($console.password)
#end for
#end for

194
nova/console/xvp.py Normal file
View File

@ -0,0 +1,194 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
XVP (Xenserver VNC Proxy) driver.
"""
import fcntl
import logging
import os
import signal
import subprocess
from Cheetah.Template import Template
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import utils
flags.DEFINE_string('console_xvp_conf_template',
utils.abspath('console/xvp.conf.template'),
'XVP conf template')
flags.DEFINE_string('console_xvp_conf',
'/etc/xvp.conf',
'generated XVP conf file')
flags.DEFINE_string('console_xvp_pid',
'/var/run/xvp.pid',
'XVP master process pid file')
flags.DEFINE_string('console_xvp_log',
'/var/log/xvp.log',
'XVP log file')
flags.DEFINE_integer('console_xvp_multiplex_port',
5900,
"port for XVP to multiplex VNC connections on")
FLAGS = flags.FLAGS
class XVPConsoleProxy(object):
"""Sets up XVP config, and manages xvp daemon"""
def __init__(self):
self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read()
self.host = FLAGS.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
def console_type(self):
return "vnc+xvp"
def get_port(self, context):
"""get available port for consoles that need one"""
#TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
return FLAGS.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies"""
self._rebuild_xvp_conf(context.elevated())
def teardown_console(self, context, console):
"""Tears down actual proxies"""
self._rebuild_xvp_conf(context.elevated())
def init_host(self):
"""Start up any config'ed consoles on start"""
ctxt = context.get_admin_context()
self._rebuild_xvp_conf(ctxt)
def fix_pool_password(self, password):
"""Trim password to length, and encode"""
return self._xvp_encrypt(password, is_pool_password=True)
def fix_console_password(self, password):
"""Trim password to length, and encode"""
return self._xvp_encrypt(password)
def generate_password(self, length=8):
"""Returns random console password"""
return os.urandom(length * 2).encode('base64')[:length]
def _rebuild_xvp_conf(self, context):
logging.debug("Rebuilding xvp conf")
pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host,
self.console_type)
if pool['consoles']]
if not pools:
logging.debug("No console pools!")
self._xvp_stop()
return
conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port,
'pools': pools,
'pass_encode': self.fix_console_password}
config = str(Template(self.xvpconf_template, searchList=[conf_data]))
self._write_conf(config)
self._xvp_restart()
def _write_conf(self, config):
logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf)
with open(FLAGS.console_xvp_conf, 'w') as cfile:
cfile.write(config)
def _xvp_stop(self):
logging.debug("Stopping xvp")
pid = self._xvp_pid()
if not pid:
return
try:
os.kill(pid, signal.SIGTERM)
except OSError:
#if it's already not running, no problem.
pass
def _xvp_start(self):
if self._xvp_check_running():
return
logging.debug("Starting xvp")
try:
utils.execute('xvp -p %s -c %s -l %s' %
(FLAGS.console_xvp_pid,
FLAGS.console_xvp_conf,
FLAGS.console_xvp_log))
except exception.ProcessExecutionError, err:
logging.error("Error starting xvp: %s" % err)
def _xvp_restart(self):
logging.debug("Restarting xvp")
if not self._xvp_check_running():
logging.debug("xvp not running...")
self._xvp_start()
else:
pid = self._xvp_pid()
os.kill(pid, signal.SIGUSR1)
def _xvp_pid(self):
try:
with open(FLAGS.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
except ValueError:
return None
return pid
def _xvp_check_running(self):
pid = self._xvp_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _xvp_encrypt(self, password, is_pool_password=False):
"""Call xvp to obfuscate passwords for config file.
Args:
- password: the password to encode, max 8 char for vm passwords,
and 16 chars for pool passwords. passwords will
be trimmed to max len before encoding.
- is_pool_password: True if this this is the XenServer api password
False if it's a VM console password
(xvp uses different keys and max lengths for pool passwords)
Note that xvp's obfuscation should not be considered 'real' encryption.
It simply DES encrypts the passwords with static keys plainly viewable
in the xvp source code."""
maxlen = 8
flag = '-e'
if is_pool_password:
maxlen = 16
flag = '-x'
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp %s' % flag, process_input=password)
return out.strip()

View File

@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
import base64
import gettext
import hashlib
import logging
import os
import shutil
import struct
@ -39,8 +38,10 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
from nova import log as logging
LOG = logging.getLogger("nova.crypto")
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('key_file',
@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open(inbound, "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug(_("Flags path: %s") % ca_folder)
LOG.debug(_("Flags path: %s"), ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)

Some files were not shown because too many files have changed in this diff Show More