Merge trunk and re-run build_i18n

This commit is contained in:
jaypipes@gmail.com 2011-02-21 10:04:32 -05:00
commit 6facb35c26
54 changed files with 1757 additions and 448 deletions

View File

@ -1,36 +1,43 @@
# Format is: # Format is:
# <preferred e-mail> <other e-mail> # <preferred e-mail> <other e-mail 1>
<code@term.ie> <github@anarkystic.com> # <preferred e-mail> <other e-mail 2>
<code@term.ie> <termie@preciousroy.local>
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
<jaypipes@gmail.com> <jpipes@serialcoder>
<anotherjesse@gmail.com> <jesse@dancelamb> <anotherjesse@gmail.com> <jesse@dancelamb>
<anotherjesse@gmail.com> <jesse@gigantor.local> <anotherjesse@gmail.com> <jesse@gigantor.local>
<anotherjesse@gmail.com> <jesse@ubuntu> <anotherjesse@gmail.com> <jesse@ubuntu>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com> <ant@openstack.org> <amesserl@rackspace.com>
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
<bschott@isi.edu> <bfschott@gmail.com>
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
<code@term.ie> <github@anarkystic.com>
<code@term.ie> <termie@preciousroy.local>
<corywright@gmail.com> <cory.wright@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
<jaypipes@gmail.com> <jpipes@serialcoder>
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local> <jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov> <jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
<justin@fathomdb.com> <justinsb@justinsb-desktop> <justin@fathomdb.com> <justinsb@justinsb-desktop>
<masumotok@nttdata.co.jp> <root@openstack2-api> <justin@fathomdb.com> <superstack@superstack.org>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp> <masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
<masumotok@nttdata.co.jp> <root@openstack2-api>
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson> <mordred@inaugust.com> <mordred@hudson>
<paul@openstack.org> <pvoccio@castor.local>
<paul@openstack.org> <paul.voccio@rackspace.com> <paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<rlane@wikimedia.org> <laner@controller>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<soren.hansen@rackspace.com> <soren@linux2go.dk> <soren.hansen@rackspace.com> <soren@linux2go.dk>
<todd@ansolabs.com> <todd@lapex> <todd@ansolabs.com> <todd@lapex>
<todd@ansolabs.com> <todd@rubidine.com> <todd@ansolabs.com> <todd@rubidine.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com> <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com>
<ueno.nachi@lab.ntt.co.jp> <nova@u4>
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu> <vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu> <vishvananda@gmail.com> <vishvananda@yahoo.com>
<rlane@wikimedia.org> <laner@controller>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
<justin@fathomdb.com> <superstack@superstack.org>

11
Authors
View File

@ -4,13 +4,16 @@ Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org> Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Bilal Akhtar <bilalakhtar@ubuntu.com> Bilal Akhtar <bilalakhtar@ubuntu.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Schott <bschott@isi.edu>
Brian Waldon <brian.waldon@rackspace.com>
Chiradeep Vittal <chiradeep@cloud.com> Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com> Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com> Chris Behrens <cbehrens@codestud.com>
Christian Berendt <berendt@b1-systems.de> Christian Berendt <berendt@b1-systems.de>
Cory Wright <corywright@gmail.com> Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dan Prince <dan.prince@rackspace.com> Dan Prince <dan.prince@rackspace.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com> Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com> Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com> Ed Leafe <ed@leafe.com>
@ -41,7 +44,8 @@ Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com> Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com> MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp> Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4> Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveed.massjouni@rackspace.com>
Paul Voccio <paul@openstack.org> Paul Voccio <paul@openstack.org>
Ricardo Carrillo Cruz <emaildericky@gmail.com> Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org> Rick Clark <rick@openstack.org>
@ -55,7 +59,8 @@ Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org> Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com> Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com> Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in> Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com> Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com>

19
HACKING
View File

@ -47,3 +47,22 @@ Human Alphabetical Order Examples
from nova.auth import users from nova.auth import users
from nova.endpoint import api from nova.endpoint import api
from nova.endpoint import cloud from nova.endpoint import cloud
Docstrings
----------
"""Summary of the function, class or method, less than 80 characters.
New paragraph after newline that explains in more detail any general
information about the function, class or method. After this, if defining
parameters and return types use the Sphinx format. After that an extra
newline then close the quotations.
When writing the docstring for a class, an extra line should be placed
after the closing quotations. For more in-depth explanations for these
decisions see http://www.python.org/dev/peps/pep-0257/
:param foo: the foo parameter
:param bar: the bar parameter
:returns: description of the return value
"""

View File

@ -6,14 +6,23 @@ graft doc
graft smoketests graft smoketests
graft tools graft tools
graft etc graft etc
graft bzrplugins
graft contrib
graft po
graft plugins
include nova/api/openstack/notes.txt include nova/api/openstack/notes.txt
include nova/auth/*.schema
include nova/auth/novarc.template include nova/auth/novarc.template
include nova/auth/opendj.sh
include nova/auth/slap.sh include nova/auth/slap.sh
include nova/cloudpipe/bootscript.sh include nova/cloudpipe/bootscript.sh
include nova/cloudpipe/client.ovpn.template include nova/cloudpipe/client.ovpn.template
include nova/cloudpipe/bootscript.template
include nova/compute/fakevirtinstance.xml include nova/compute/fakevirtinstance.xml
include nova/compute/interfaces.template include nova/compute/interfaces.template
include nova/console/xvp.conf.template
include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README
include nova/virt/interfaces.template include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template include nova/virt/libvirt*.xml.template
include nova/tests/CA/ include nova/tests/CA/
@ -25,6 +34,7 @@ include nova/tests/bundle/1mb.manifest.xml
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.0
include nova/tests/bundle/1mb.part.1 include nova/tests/bundle/1mb.part.1
include nova/tests/db/nova.austin.sqlite
include plugins/xenapi/README include plugins/xenapi/README
include plugins/xenapi/etc/xapi.d/plugins/objectstore include plugins/xenapi/etc/xapi.d/plugins/objectstore
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py

View File

@ -433,6 +433,37 @@ class ProjectCommands(object):
"nova-api server on this host.") "nova-api server on this host.")
class FixedIpCommands(object):
"""Class for managing fixed ip."""
def list(self, host=None):
"""Lists all fixed ips (optionally by host) arguments: [host]"""
ctxt = context.get_admin_context()
if host == None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('MAC address'),
_('hostname'),
_('host'))
for fixed_ip in fixed_ips:
hostname = None
host = None
mac_address = None
if fixed_ip['instance']:
instance = fixed_ip['instance']
hostname = instance['hostname']
host = instance['host']
mac_address = instance['mac_address']
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
fixed_ip['network']['cidr'],
fixed_ip['address'],
mac_address, hostname, host)
class FloatingIpCommands(object): class FloatingIpCommands(object):
"""Class for managing floating ip.""" """Class for managing floating ip."""
@ -472,8 +503,8 @@ class NetworkCommands(object):
"""Class for managing networks.""" """Class for managing networks."""
def create(self, fixed_range=None, num_networks=None, def create(self, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None, vpn_start=None, network_size=None, vlan_start=None,
fixed_range_v6=None): vpn_start=None, fixed_range_v6=None, label='public'):
"""Creates fixed ips for host by range """Creates fixed ips for host by range
arguments: [fixed_range=FLAG], [num_networks=FLAG], arguments: [fixed_range=FLAG], [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG], [network_size=FLAG], [vlan_start=FLAG],
@ -495,9 +526,22 @@ class NetworkCommands(object):
cidr=fixed_range, cidr=fixed_range,
num_networks=int(num_networks), num_networks=int(num_networks),
network_size=int(network_size), network_size=int(network_size),
cidr_v6=fixed_range_v6,
vlan_start=int(vlan_start), vlan_start=int(vlan_start),
vpn_start=int(vpn_start)) vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
label=label)
def list(self):
"""List all created networks"""
print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'),
_('netmask'),
_('start address'),
'DNS')
for network in db.network_get_all(context.get_admin_context()):
print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr,
network.netmask,
network.dhcp_start,
network.dns)
class ServiceCommands(object): class ServiceCommands(object):
@ -579,6 +623,13 @@ class VolumeCommands(object):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id)) volume = db.volume_get(ctxt, param2id(volume_id))
host = volume['host'] host = volume['host']
if not host:
print "Volume not yet assigned to host."
print "Deleting volume from database and skipping rpc."
db.volume_destroy(ctxt, param2id(volume_id))
return
if volume['status'] == 'in-use': if volume['status'] == 'in-use':
print "Volume is in-use." print "Volume is in-use."
print "Detach volume from instance and then try again." print "Detach volume from instance and then try again."
@ -615,6 +666,7 @@ CATEGORIES = [
('role', RoleCommands), ('role', RoleCommands),
('shell', ShellCommands), ('shell', ShellCommands),
('vpn', VpnCommands), ('vpn', VpnCommands),
('fixed', FixedIpCommands),
('floating', FloatingIpCommands), ('floating', FloatingIpCommands),
('network', NetworkCommands), ('network', NetworkCommands),
('service', ServiceCommands), ('service', ServiceCommands),

View File

@ -20,7 +20,6 @@ Starting point for routing EC2 requests.
""" """
import datetime
import webob import webob
import webob.dec import webob.dec
import webob.exc import webob.exc
@ -56,23 +55,20 @@ class RequestLogging(wsgi.Middleware):
@webob.dec.wsgify @webob.dec.wsgify
def __call__(self, req): def __call__(self, req):
start = utils.utcnow()
rv = req.get_response(self.application) rv = req.get_response(self.application)
self.log_request_completion(rv, req) self.log_request_completion(rv, req, start)
return rv return rv
def log_request_completion(self, response, request): def log_request_completion(self, response, request, start):
controller = request.environ.get('ec2.controller', None) controller = request.environ.get('ec2.controller', None)
if controller: if controller:
controller = controller.__class__.__name__ controller = controller.__class__.__name__
action = request.environ.get('ec2.action', None) action = request.environ.get('ec2.action', None)
ctxt = request.environ.get('ec2.context', None) ctxt = request.environ.get('ec2.context', None)
seconds = 'X' delta = utils.utcnow() - start
microseconds = 'X' seconds = delta.seconds
if ctxt: microseconds = delta.microseconds
delta = datetime.datetime.utcnow() - \
ctxt.timestamp
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info( LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s", "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds, seconds,
@ -294,7 +290,7 @@ class Authorizer(wsgi.Middleware):
return True return True
if 'none' in roles: if 'none' in roles:
return False return False
return any(context.project.has_role(context.user.id, role) return any(context.project.has_role(context.user_id, role)
for role in roles) for role in roles)

View File

@ -20,6 +20,7 @@
APIRequest class APIRequest class
""" """
import datetime
import re import re
# TODO(termie): replace minidom with etree # TODO(termie): replace minidom with etree
from xml.dom import minidom from xml.dom import minidom
@ -171,6 +172,8 @@ class APIRequest(object):
self._render_dict(xml, data_el, data.__dict__) self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool): elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower())) data_el.appendChild(xml.createTextNode(str(data).lower()))
elif isinstance(data, datetime.datetime):
data_el.appendChild(xml.createTextNode(data.isoformat()))
elif data != None: elif data != None:
data_el.appendChild(xml.createTextNode(str(data))) data_el.appendChild(xml.createTextNode(str(data)))

View File

@ -282,7 +282,7 @@ class CloudController(object):
'description': 'fixme'}]} 'description': 'fixme'}]}
def describe_key_pairs(self, context, key_name=None, **kwargs): def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = db.key_pair_get_all_by_user(context, context.user.id) key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
if not key_name is None: if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name] key_pairs = [x for x in key_pairs if x['name'] in key_name]
@ -290,7 +290,7 @@ class CloudController(object):
for key_pair in key_pairs: for key_pair in key_pairs:
# filter out the vpn keys # filter out the vpn keys
suffix = FLAGS.vpn_key_suffix suffix = FLAGS.vpn_key_suffix
if context.user.is_admin() or \ if context.is_admin or \
not key_pair['name'].endswith(suffix): not key_pair['name'].endswith(suffix):
result.append({ result.append({
'keyName': key_pair['name'], 'keyName': key_pair['name'],
@ -301,7 +301,7 @@ class CloudController(object):
def create_key_pair(self, context, key_name, **kwargs): def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context) LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user.id, key_name) data = _gen_key(context, context.user_id, key_name)
return {'keyName': key_name, return {'keyName': key_name,
'keyFingerprint': data['fingerprint'], 'keyFingerprint': data['fingerprint'],
'keyMaterial': data['private_key']} 'keyMaterial': data['private_key']}
@ -310,7 +310,7 @@ class CloudController(object):
def delete_key_pair(self, context, key_name, **kwargs): def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context) LOG.audit(_("Delete key pair %s"), key_name, context=context)
try: try:
db.key_pair_destroy(context, context.user.id, key_name) db.key_pair_destroy(context, context.user_id, key_name)
except exception.NotFound: except exception.NotFound:
# aws returns true even if the key doesn't exist # aws returns true even if the key doesn't exist
pass pass
@ -318,7 +318,7 @@ class CloudController(object):
def describe_security_groups(self, context, group_name=None, **kwargs): def describe_security_groups(self, context, group_name=None, **kwargs):
self.compute_api.ensure_default_security_group(context) self.compute_api.ensure_default_security_group(context)
if context.user.is_admin(): if context.is_admin:
groups = db.security_group_get_all(context) groups = db.security_group_get_all(context)
else: else:
groups = db.security_group_get_by_project(context, groups = db.security_group_get_by_project(context,
@ -494,7 +494,7 @@ class CloudController(object):
if db.security_group_exists(context, context.project_id, group_name): if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name) raise exception.ApiError(_('group %s already exists') % group_name)
group = {'user_id': context.user.id, group = {'user_id': context.user_id,
'project_id': context.project_id, 'project_id': context.project_id,
'name': group_name, 'name': group_name,
'description': group_description} 'description': group_description}
@ -674,7 +674,7 @@ class CloudController(object):
else: else:
instances = self.compute_api.get_all(context, **kwargs) instances = self.compute_api.get_all(context, **kwargs)
for instance in instances: for instance in instances:
if not context.user.is_admin(): if not context.is_admin:
if instance['image_id'] == FLAGS.vpn_image_id: if instance['image_id'] == FLAGS.vpn_image_id:
continue continue
i = {} i = {}
@ -702,7 +702,7 @@ class CloudController(object):
i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name'] i['keyName'] = instance['key_name']
if context.user.is_admin(): if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'], i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'], instance['project_id'],
instance['host']) instance['host'])
@ -736,7 +736,7 @@ class CloudController(object):
def format_addresses(self, context): def format_addresses(self, context):
addresses = [] addresses = []
if context.user.is_admin(): if context.is_admin:
iterator = db.floating_ip_get_all(context) iterator = db.floating_ip_get_all(context)
else: else:
iterator = db.floating_ip_get_all_by_project(context, iterator = db.floating_ip_get_all_by_project(context,
@ -750,7 +750,7 @@ class CloudController(object):
ec2_id = id_to_ec2_id(instance_id) ec2_id = id_to_ec2_id(instance_id)
address_rv = {'public_ip': address, address_rv = {'public_ip': address,
'instance_id': ec2_id} 'instance_id': ec2_id}
if context.user.is_admin(): if context.is_admin:
details = "%s (%s)" % (address_rv['instance_id'], details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id']) floating_ip_ref['project_id'])
address_rv['instance_id'] = details address_rv['instance_id'] = details

View File

@ -34,6 +34,7 @@ from nova.api.openstack import flavors
from nova.api.openstack import images from nova.api.openstack import images
from nova.api.openstack import servers from nova.api.openstack import servers
from nova.api.openstack import shared_ip_groups from nova.api.openstack import shared_ip_groups
from nova.api.openstack import zones
LOG = logging.getLogger('nova.api.openstack') LOG = logging.getLogger('nova.api.openstack')
@ -79,6 +80,10 @@ class APIRouter(wsgi.Router):
server_members["actions"] = "GET" server_members["actions"] = "GET"
server_members['suspend'] = 'POST' server_members['suspend'] = 'POST'
server_members['resume'] = 'POST' server_members['resume'] = 'POST'
server_members['reset_network'] = 'POST'
mapper.resource("zone", "zones", controller=zones.Controller(),
collection={'detail': 'GET'})
mapper.resource("server", "servers", controller=servers.Controller(), mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'}, collection={'detail': 'GET'},

View File

@ -19,6 +19,7 @@ import datetime
import hashlib import hashlib
import json import json
import time import time
import logging
import webob.exc import webob.exc
import webob.dec import webob.dec

View File

@ -18,22 +18,29 @@
from nova import exception from nova import exception
def limited(items, req): def limited(items, request, max_limit=1000):
"""Return a slice of items according to requested offset and limit.
items - a sliceable
req - wobob.Request possibly containing offset and limit GET variables.
offset is where to start in the list, and limit is the maximum number
of items to return.
If limit is not specified, 0, or > 1000, defaults to 1000.
""" """
Return a slice of items according to requested offset and limit.
offset = int(req.GET.get('offset', 0)) @param items: A sliceable entity
limit = int(req.GET.get('limit', 0)) @param request: `webob.Request` possibly containing 'offset' and 'limit'
if not limit: GET variables. 'offset' is where to start in the list,
limit = 1000 and 'limit' is the maximum number of items to return. If
limit = min(1000, limit) 'limit' is not specified, 0, or > max_limit, we default
to max_limit.
@kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
offset = 0
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
limit = max_limit
limit = min(max_limit, limit or max_limit)
range_end = offset + limit range_end = offset + limit
return items[offset:range_end] return items[offset:range_end]

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC. # Copyright 2010 OpenStack LLC.
# All Rights Reserved. # All Rights Reserved.
# #
@ -64,6 +62,22 @@ def _translate_detail_keys(inst):
inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['status'] = power_mapping[inst_dict['status']]
inst_dict['addresses'] = dict(public=[], private=[]) inst_dict['addresses'] = dict(public=[], private=[])
# grab single private fixed ip
try:
private_ip = inst['fixed_ip']['address']
if private_ip:
inst_dict['addresses']['private'].append(private_ip)
except KeyError:
LOG.debug(_("Failed to read private ip"))
# grab all public floating ips
try:
for floating in inst['fixed_ip']['floating_ips']:
inst_dict['addresses']['public'].append(floating['address'])
except KeyError:
LOG.debug(_("Failed to read public ip(s)"))
inst_dict['metadata'] = {} inst_dict['metadata'] = {}
inst_dict['hostId'] = '' inst_dict['hostId'] = ''
@ -148,8 +162,12 @@ class Controller(wsgi.Controller):
if not env: if not env:
return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPUnprocessableEntity())
key_pair = auth_manager.AuthManager.get_key_pairs( key_pairs = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0] req.environ['nova.context'])
if not key_pairs:
raise exception.NotFound(_("No keypairs defined"))
key_pair = key_pairs[0]
image_id = common.get_image_id_from_image_hash(self._image_service, image_id = common.get_image_id_from_image_hash(self._image_service,
req.environ['nova.context'], env['server']['imageId']) req.environ['nova.context'], env['server']['imageId'])
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
@ -163,7 +181,8 @@ class Controller(wsgi.Controller):
display_name=env['server']['name'], display_name=env['server']['name'],
display_description=env['server']['name'], display_description=env['server']['name'],
key_name=key_pair['name'], key_name=key_pair['name'],
key_data=key_pair['public_key']) key_data=key_pair['public_key'],
onset_files=env.get('onset_files', []))
return _translate_keys(instances[0]) return _translate_keys(instances[0])
def update(self, req, id): def update(self, req, id):
@ -249,6 +268,20 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted() return exc.HTTPAccepted()
def reset_network(self, req, id):
"""
Reset networking on an instance (admin only).
"""
context = req.environ['nova.context']
try:
self.compute_api.reset_network(context, id)
except:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::reset_network %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def pause(self, req, id): def pause(self, req, id):
""" Permit Admins to Pause the server. """ """ Permit Admins to Pause the server. """
ctxt = req.environ['nova.context'] ctxt = req.environ['nova.context']

View File

@ -0,0 +1,80 @@
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import common
import logging
from nova import flags
from nova import wsgi
from nova import db
FLAGS = flags.FLAGS
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _scrub_zone(zone):
return _filter_keys(zone, ('id', 'api_url'))
class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
"zone": ["id", "api_url"]}}}
def index(self, req):
"""Return all zones in brief"""
items = db.zone_get_all(req.environ['nova.context'])
items = common.limited(items, req)
items = [_scrub_zone(item) for item in items]
return dict(zones=items)
def detail(self, req):
"""Return all zones in detail"""
return self.index(req)
def show(self, req, id):
"""Return data about the given zone id"""
zone_id = int(id)
zone = db.zone_get(req.environ['nova.context'], zone_id)
return dict(zone=_scrub_zone(zone))
def delete(self, req, id):
zone_id = int(id)
db.zone_delete(req.environ['nova.context'], zone_id)
return {}
def create(self, req):
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
zone = db.zone_create(context, env["zone"])
return dict(zone=_scrub_zone(zone))
def update(self, req, id):
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
zone_id = int(id)
zone = db.zone_update(context, zone_id, env["zone"])
return dict(zone=_scrub_zone(zone))

View File

@ -74,6 +74,25 @@ LOG = logging.getLogger("nova.ldapdriver")
# in which we may want to change the interface a bit more. # in which we may want to change the interface a bit more.
def _clean(attr):
"""Clean attr for insertion into ldap"""
if attr is None:
return None
if type(attr) is unicode:
return str(attr)
return attr
def sanitize(fn):
"""Decorator to sanitize all args"""
def _wrapped(self, *args, **kwargs):
args = [_clean(x) for x in args]
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
return fn(self, *args, **kwargs)
_wrapped.func_name = fn.func_name
return _wrapped
class LdapDriver(object): class LdapDriver(object):
"""Ldap Auth driver """Ldap Auth driver
@ -106,23 +125,27 @@ class LdapDriver(object):
self.conn.unbind_s() self.conn.unbind_s()
return False return False
@sanitize
def get_user(self, uid): def get_user(self, uid):
"""Retrieve user by id""" """Retrieve user by id"""
attr = self.__get_ldap_user(uid) attr = self.__get_ldap_user(uid)
return self.__to_user(attr) return self.__to_user(attr)
@sanitize
def get_user_from_access_key(self, access): def get_user_from_access_key(self, access):
"""Retrieve user by access key""" """Retrieve user by access key"""
query = '(accessKey=%s)' % access query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree dn = FLAGS.ldap_user_subtree
return self.__to_user(self.__find_object(dn, query)) return self.__to_user(self.__find_object(dn, query))
@sanitize
def get_project(self, pid): def get_project(self, pid):
"""Retrieve project by id""" """Retrieve project by id"""
dn = self.__project_to_dn(pid) dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern) attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr) return self.__to_project(attr)
@sanitize
def get_users(self): def get_users(self):
"""Retrieve list of users""" """Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree, attrs = self.__find_objects(FLAGS.ldap_user_subtree,
@ -134,6 +157,7 @@ class LdapDriver(object):
users.append(user) users.append(user)
return users return users
@sanitize
def get_projects(self, uid=None): def get_projects(self, uid=None):
"""Retrieve list of projects""" """Retrieve list of projects"""
pattern = LdapDriver.project_pattern pattern = LdapDriver.project_pattern
@ -143,6 +167,7 @@ class LdapDriver(object):
pattern) pattern)
return [self.__to_project(attr) for attr in attrs] return [self.__to_project(attr) for attr in attrs]
@sanitize
def create_user(self, name, access_key, secret_key, is_admin): def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user""" """Create a user"""
if self.__user_exists(name): if self.__user_exists(name):
@ -196,6 +221,7 @@ class LdapDriver(object):
self.conn.add_s(self.__uid_to_dn(name), attr) self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr)) return self.__to_user(dict(attr))
@sanitize
def create_project(self, name, manager_uid, def create_project(self, name, manager_uid,
description=None, member_uids=None): description=None, member_uids=None):
"""Create a project""" """Create a project"""
@ -231,6 +257,7 @@ class LdapDriver(object):
self.conn.add_s(dn, attr) self.conn.add_s(dn, attr)
return self.__to_project(dict(attr)) return self.__to_project(dict(attr))
@sanitize
def modify_project(self, project_id, manager_uid=None, description=None): def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project""" """Modify an existing project"""
if not manager_uid and not description: if not manager_uid and not description:
@ -249,21 +276,25 @@ class LdapDriver(object):
dn = self.__project_to_dn(project_id) dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr) self.conn.modify_s(dn, attr)
@sanitize
def add_to_project(self, uid, project_id): def add_to_project(self, uid, project_id):
"""Add user to project""" """Add user to project"""
dn = self.__project_to_dn(project_id) dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn) return self.__add_to_group(uid, dn)
@sanitize
def remove_from_project(self, uid, project_id): def remove_from_project(self, uid, project_id):
"""Remove user from project""" """Remove user from project"""
dn = self.__project_to_dn(project_id) dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn) return self.__remove_from_group(uid, dn)
@sanitize
def is_in_project(self, uid, project_id): def is_in_project(self, uid, project_id):
"""Check if user is in project""" """Check if user is in project"""
dn = self.__project_to_dn(project_id) dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn) return self.__is_in_group(uid, dn)
@sanitize
def has_role(self, uid, role, project_id=None): def has_role(self, uid, role, project_id=None):
"""Check if user has role """Check if user has role
@ -273,6 +304,7 @@ class LdapDriver(object):
role_dn = self.__role_to_dn(role, project_id) role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn) return self.__is_in_group(uid, role_dn)
@sanitize
def add_role(self, uid, role, project_id=None): def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)""" """Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id) role_dn = self.__role_to_dn(role, project_id)
@ -283,11 +315,13 @@ class LdapDriver(object):
else: else:
return self.__add_to_group(uid, role_dn) return self.__add_to_group(uid, role_dn)
@sanitize
def remove_role(self, uid, role, project_id=None): def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)""" """Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id) role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn) return self.__remove_from_group(uid, role_dn)
@sanitize
def get_user_roles(self, uid, project_id=None): def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)""" """Retrieve list of roles for user (or user and project)"""
if project_id is None: if project_id is None:
@ -307,6 +341,7 @@ class LdapDriver(object):
roles = self.__find_objects(project_dn, query) roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles] return [role['cn'][0] for role in roles]
@sanitize
def delete_user(self, uid): def delete_user(self, uid):
"""Delete a user""" """Delete a user"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
@ -332,12 +367,14 @@ class LdapDriver(object):
# Delete entry # Delete entry
self.conn.delete_s(self.__uid_to_dn(uid)) self.conn.delete_s(self.__uid_to_dn(uid))
@sanitize
def delete_project(self, project_id): def delete_project(self, project_id):
"""Delete a project""" """Delete a project"""
project_dn = self.__project_to_dn(project_id) project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn) self.__delete_roles(project_dn)
self.__delete_group(project_dn) self.__delete_group(project_dn)
@sanitize
def modify_user(self, uid, access_key=None, secret_key=None, admin=None): def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing user""" """Modify an existing user"""
if not access_key and not secret_key and admin is None: if not access_key and not secret_key and admin is None:

View File

@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
export CLOUD_SERVERS_API_KEY="%(access)s" export NOVA_API_KEY="%(access)s"
export CLOUD_SERVERS_USERNAME="%(user)s" export NOVA_USERNAME="%(user)s"
export CLOUD_SERVERS_URL="%(os)s" export NOVA_URL="%(os)s"

View File

@ -67,10 +67,10 @@ class API(base.Base):
"""Get the network topic for an instance.""" """Get the network topic for an instance."""
try: try:
instance = self.get(context, instance_id) instance = self.get(context, instance_id)
except exception.NotFound as e: except exception.NotFound:
LOG.warning(_("Instance %d was not found in get_network_topic"), LOG.warning(_("Instance %d was not found in get_network_topic"),
instance_id) instance_id)
raise e raise
host = instance['host'] host = instance['host']
if not host: if not host:
@ -85,10 +85,11 @@ class API(base.Base):
min_count=1, max_count=1, min_count=1, max_count=1,
display_name='', display_description='', display_name='', display_description='',
key_name=None, key_data=None, security_group='default', key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None): availability_zone=None, user_data=None,
onset_files=None):
"""Create the number of instances requested if quota and """Create the number of instances requested if quota and
other arguments check out ok.""" other arguments check out ok.
"""
type_data = instance_types.INSTANCE_TYPES[instance_type] type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data) num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count: if num_instances < min_count:
@ -99,25 +100,23 @@ class API(base.Base):
"run %s more instances of this type.") % "run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded") num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id image = self.image_service.show(context, image_id)
if not is_vpn: if kernel_id is None:
image = self.image_service.show(context, image_id) kernel_id = image.get('kernel_id', None)
if kernel_id is None: if ramdisk_id is None:
kernel_id = image.get('kernel_id', None) ramdisk_id = image.get('ramdisk_id', None)
if ramdisk_id is None: # No kernel and ramdisk for raw images
ramdisk_id = image.get('ramdisk_id', None) if kernel_id == str(FLAGS.null_kernel):
# No kernel and ramdisk for raw images kernel_id = None
if kernel_id == str(FLAGS.null_kernel): ramdisk_id = None
kernel_id = None LOG.debug(_("Creating a raw instance"))
ramdisk_id = None # Make sure we have access to kernel and ramdisk (if not raw)
LOG.debug(_("Creating a raw instance")) logging.debug("Using Kernel=%s, Ramdisk=%s" %
# Make sure we have access to kernel and ramdisk (if not raw) (kernel_id, ramdisk_id))
logging.debug("Using Kernel=%s, Ramdisk=%s" % if kernel_id:
(kernel_id, ramdisk_id)) self.image_service.show(context, kernel_id)
if kernel_id: if ramdisk_id:
self.image_service.show(context, kernel_id) self.image_service.show(context, ramdisk_id)
if ramdisk_id:
self.image_service.show(context, ramdisk_id)
if security_group is None: if security_group is None:
security_group = ['default'] security_group = ['default']
@ -156,7 +155,6 @@ class API(base.Base):
'key_data': key_data, 'key_data': key_data,
'locked': False, 'locked': False,
'availability_zone': availability_zone} 'availability_zone': availability_zone}
elevated = context.elevated() elevated = context.elevated()
instances = [] instances = []
LOG.debug(_("Going to run %s instances..."), num_instances) LOG.debug(_("Going to run %s instances..."), num_instances)
@ -193,7 +191,8 @@ class API(base.Base):
{"method": "run_instance", {"method": "run_instance",
"args": {"topic": FLAGS.compute_topic, "args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id, "instance_id": instance_id,
"availability_zone": availability_zone}}) "availability_zone": availability_zone,
"onset_files": onset_files}})
for group_id in security_groups: for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id) self.trigger_security_group_members_refresh(elevated, group_id)
@ -293,10 +292,10 @@ class API(base.Base):
LOG.debug(_("Going to try to terminate %s"), instance_id) LOG.debug(_("Going to try to terminate %s"), instance_id)
try: try:
instance = self.get(context, instance_id) instance = self.get(context, instance_id)
except exception.NotFound as e: except exception.NotFound:
LOG.warning(_("Instance %d was not found during terminate"), LOG.warning(_("Instance %d was not found during terminate"),
instance_id) instance_id)
raise e raise
if (instance['state_description'] == 'terminating'): if (instance['state_description'] == 'terminating'):
LOG.warning(_("Instance %d is already being terminated"), LOG.warning(_("Instance %d is already being terminated"),
@ -434,6 +433,10 @@ class API(base.Base):
"""Set the root/admin password for the given instance.""" """Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id) self._cast_compute_message('set_admin_password', context, instance_id)
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""
self._cast_compute_message('inject_file', context, instance_id)
def get_ajax_console(self, context, instance_id): def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console""" """Get a url to an AJAX Console"""
instance = self.get(context, instance_id) instance = self.get(context, instance_id)
@ -466,6 +469,13 @@ class API(base.Base):
instance = self.get(context, instance_id) instance = self.get(context, instance_id)
return instance['locked'] return instance['locked']
def reset_network(self, context, instance_id):
"""
Reset networking on the instance.
"""
self._cast_compute_message('reset_network', context, instance_id)
def attach_volume(self, context, instance_id, volume_id, device): def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device): if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. " raise exception.ApiError(_("Invalid device specified: %s. "

View File

@ -34,6 +34,7 @@ terminating it.
:func:`nova.utils.import_object` :func:`nova.utils.import_object`
""" """
import base64
import datetime import datetime
import random import random
import string import string
@ -127,10 +128,10 @@ class ComputeManager(manager.Manager):
info = self.driver.get_info(instance_ref['name']) info = self.driver.get_info(instance_ref['name'])
state = info['state'] state = info['state']
except exception.NotFound: except exception.NotFound:
state = power_state.NOSTATE state = power_state.FAILED
self.db.instance_set_state(context, instance_id, state) self.db.instance_set_state(context, instance_id, state)
def get_console_topic(self, context, **_kwargs): def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host """Retrieves the console host for a project on this host
Currently this is just set in the flags for each compute Currently this is just set in the flags for each compute
host.""" host."""
@ -139,7 +140,7 @@ class ComputeManager(manager.Manager):
FLAGS.console_topic, FLAGS.console_topic,
FLAGS.console_host) FLAGS.console_host)
def get_network_topic(self, context, **_kwargs): def get_network_topic(self, context, **kwargs):
"""Retrieves the network host for a project on this host""" """Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make # TODO(vish): This method should be memoized. This will make
# the call to get_network_host cheaper, so that # the call to get_network_host cheaper, so that
@ -158,21 +159,22 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception @exception.wrap_exception
def refresh_security_group_rules(self, context, def refresh_security_group_rules(self, context,
security_group_id, **_kwargs): security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver.""" """This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_rules(security_group_id) return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception @exception.wrap_exception
def refresh_security_group_members(self, context, def refresh_security_group_members(self, context,
security_group_id, **_kwargs): security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver.""" """This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_members(security_group_id) return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception @exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs): def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options.""" """Launch a new instance with specified options."""
context = context.elevated() context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
instance_ref.onset_files = kwargs.get('onset_files', [])
if instance_ref['name'] in self.driver.list_instances(): if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created")) raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id, LOG.audit(_("instance %s: starting..."), instance_id,
@ -323,28 +325,43 @@ class ComputeManager(manager.Manager):
"""Set the root/admin password for an instance on this server.""" """Set the root/admin password for an instance on this server."""
context = context.elevated() context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['state'] != power_state.RUNNING: instance_id = instance_ref['id']
logging.warn('trying to reset the password on a non-running ' instance_state = instance_ref['state']
'instance: %s (state: %s expected: %s)', expected_state = power_state.RUNNING
instance_ref['id'], if instance_state != expected_state:
instance_ref['state'], LOG.warn(_('trying to reset the password on a non-running '
power_state.RUNNING) 'instance: %(instance_id)s (state: %(instance_state)s '
'expected: %(expected_state)s)') % locals())
logging.debug('instance %s: setting admin password', LOG.audit(_('instance %s: setting admin password'),
instance_ref['name']) instance_ref['name'])
if new_pass is None: if new_pass is None:
# Generate a random password # Generate a random password
new_pass = self._generate_password(FLAGS.password_length) new_pass = utils.generate_password(FLAGS.password_length)
self.driver.set_admin_password(instance_ref, new_pass) self.driver.set_admin_password(instance_ref, new_pass)
self._update_state(context, instance_id) self._update_state(context, instance_id)
def _generate_password(self, length=20): @exception.wrap_exception
"""Generate a random sequence of letters and digits @checks_instance_lock
to be used as a password. def inject_file(self, context, instance_id, path, file_contents):
""" """Write a file to the specified path on an instance on this server"""
chrs = string.letters + string.digits context = context.elevated()
return "".join([random.choice(chrs) for i in xrange(length)]) instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
instance_state = instance_ref['state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
'instance: %(instance_id)s (state: %(instance_state)s '
'expected: %(expected_state)s)') % locals())
# Files/paths *should* be base64-encoded at this point, but
# double-check to make sure.
b64_path = utils.ensure_b64_encoding(path)
b64_contents = utils.ensure_b64_encoding(file_contents)
plain_path = base64.b64decode(b64_path)
nm = instance_ref['name']
msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals()
LOG.audit(msg)
self.driver.inject_file(instance_ref, b64_path, b64_contents)
@exception.wrap_exception @exception.wrap_exception
@checks_instance_lock @checks_instance_lock
@ -498,6 +515,18 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked'] return instance_ref['locked']
@checks_instance_lock
def reset_network(self, context, instance_id):
"""
Reset networking on the instance.
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: reset network'), instance_id,
context=context)
self.driver.reset_network(instance_ref)
@exception.wrap_exception @exception.wrap_exception
def get_console_output(self, context, instance_id): def get_console_output(self, context, instance_id):
"""Send the console output for an instance.""" """Send the console output for an instance."""
@ -511,7 +540,7 @@ class ComputeManager(manager.Manager):
def get_ajax_console(self, context, instance_id): def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console""" """Return connection information for an ajax console"""
context = context.elevated() context = context.elevated()
logging.debug(_("instance %s: getting ajax console"), instance_id) LOG.debug(_("instance %s: getting ajax console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref) return self.driver.get_ajax_console(instance_ref)

View File

@ -27,6 +27,7 @@ SHUTDOWN = 0x04
SHUTOFF = 0x05 SHUTOFF = 0x05
CRASHED = 0x06 CRASHED = 0x06
SUSPENDED = 0x07 SUSPENDED = 0x07
FAILED = 0x08
def name(code): def name(code):
@ -38,5 +39,6 @@ def name(code):
SHUTDOWN: 'shutdown', SHUTDOWN: 'shutdown',
SHUTOFF: 'shutdown', SHUTOFF: 'shutdown',
CRASHED: 'crashed', CRASHED: 'crashed',
SUSPENDED: 'suspended'} SUSPENDED: 'suspended',
FAILED: 'failed to spawn'}
return d[code] return d[code]

View File

@ -28,7 +28,6 @@ from nova import utils
class RequestContext(object): class RequestContext(object):
def __init__(self, user, project, is_admin=None, read_deleted=False, def __init__(self, user, project, is_admin=None, read_deleted=False,
remote_address=None, timestamp=None, request_id=None): remote_address=None, timestamp=None, request_id=None):
if hasattr(user, 'id'): if hasattr(user, 'id'):
@ -53,7 +52,7 @@ class RequestContext(object):
self.read_deleted = read_deleted self.read_deleted = read_deleted
self.remote_address = remote_address self.remote_address = remote_address
if not timestamp: if not timestamp:
timestamp = datetime.datetime.utcnow() timestamp = utils.utcnow()
if isinstance(timestamp, str) or isinstance(timestamp, unicode): if isinstance(timestamp, str) or isinstance(timestamp, unicode):
timestamp = utils.parse_isotime(timestamp) timestamp = utils.parse_isotime(timestamp)
self.timestamp = timestamp self.timestamp = timestamp
@ -101,7 +100,7 @@ class RequestContext(object):
return cls(**values) return cls(**values)
def elevated(self, read_deleted=False): def elevated(self, read_deleted=False):
"""Return a version of this context with admin flag set""" """Return a version of this context with admin flag set."""
return RequestContext(self.user_id, return RequestContext(self.user_id,
self.project_id, self.project_id,
True, True,

View File

@ -288,11 +288,21 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address): def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist.""" """Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address) return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_all_by_instance(context, instance_id):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
def fixed_ip_get_instance(context, address): def fixed_ip_get_instance(context, address):
"""Get an instance for a fixed ip by address.""" """Get an instance for a fixed ip by address."""
return IMPL.fixed_ip_get_instance(context, address) return IMPL.fixed_ip_get_instance(context, address)
@ -500,6 +510,11 @@ def network_get(context, network_id):
return IMPL.network_get(context, network_id) return IMPL.network_get(context, network_id)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
# pylint: disable-msg=C0103 # pylint: disable-msg=C0103
def network_get_associated_fixed_ips(context, network_id): def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated.""" """Get all network's ips that have been associated."""
@ -516,6 +531,11 @@ def network_get_by_instance(context, instance_id):
return IMPL.network_get_by_instance(context, instance_id) return IMPL.network_get_by_instance(context, instance_id)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_index(context, network_id): def network_get_index(context, network_id):
"""Get non-conflicting index for network.""" """Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id) return IMPL.network_get_index(context, network_id)
@ -556,7 +576,7 @@ def project_get_network(context, project_id, associate=True):
""" """
return IMPL.project_get_network(context, project_id) return IMPL.project_get_network(context, project_id, associate)
def project_get_network_v6(context, project_id): def project_get_network_v6(context, project_id):
@ -980,3 +1000,31 @@ def console_get_all_by_instance(context, instance_id):
def console_get(context, console_id, instance_id=None): def console_get(context, console_id, instance_id=None):
"""Get a specific console (possibly on a given instance).""" """Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_id) return IMPL.console_get(context, console_id, instance_id)
####################
def zone_create(context, values):
"""Create a new child Zone entry."""
return IMPL.zone_create(context, values)
def zone_update(context, zone_id, values):
"""Update a child Zone entry."""
return IMPL.zone_update(context, values)
def zone_delete(context, zone_id):
"""Delete a child Zone."""
return IMPL.zone_delete(context, zone_id)
def zone_get(context, zone_id):
"""Get a specific child Zone."""
return IMPL.zone_get(context, zone_id)
def zone_get_all(context):
"""Get all child Zones."""
return IMPL.zone_get_all(context)

View File

@ -583,6 +583,17 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
return result.rowcount return result.rowcount
@require_admin_context
def fixed_ip_get_all(context, session=None):
if not session:
session = get_session()
result = session.query(models.FixedIp).all()
if not result:
raise exception.NotFound(_('No fixed ips defined'))
return result
@require_context @require_context
def fixed_ip_get_by_address(context, address, session=None): def fixed_ip_get_by_address(context, address, session=None):
if not session: if not session:
@ -608,6 +619,17 @@ def fixed_ip_get_instance(context, address):
return fixed_ip_ref.instance return fixed_ip_ref.instance
@require_context
def fixed_ip_get_all_by_instance(context, instance_id):
session = get_session()
rv = session.query(models.FixedIp).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
raise exception.NotFound(_('No address for instance %s') % instance_id)
return rv
@require_context @require_context
def fixed_ip_get_instance_v6(context, address): def fixed_ip_get_instance_v6(context, address):
session = get_session() session = get_session()
@ -1056,6 +1078,15 @@ def network_get(context, network_id, session=None):
return result return result
@require_admin_context
def network_get_all(context):
session = get_session()
result = session.query(models.Network)
if not result:
raise exception.NotFound(_('No networks defined'))
return result
# NOTE(vish): pylint complains because of the long method name, but # NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods # it fits with the names of the rest of the methods
# pylint: disable-msg=C0103 # pylint: disable-msg=C0103
@ -1099,6 +1130,19 @@ def network_get_by_instance(_context, instance_id):
return rv return rv
@require_admin_context
def network_get_all_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
filter_by(deleted=False).\
join(models.Network.fixed_ips).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
raise exception.NotFound(_('No network for instance %s') % instance_id)
return rv
@require_admin_context @require_admin_context
def network_set_host(context, network_id, host_id): def network_set_host(context, network_id, host_id):
session = get_session() session = get_session()
@ -2014,3 +2058,47 @@ def console_get(context, console_id, instance_id=None):
raise exception.NotFound(_("No console with id %(console_id)s" raise exception.NotFound(_("No console with id %(console_id)s"
" %(idesc)s") % locals()) " %(idesc)s") % locals())
return result return result
####################
@require_admin_context
def zone_create(context, values):
zone = models.Zone()
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_update(context, zone_id, values):
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
session.execute('delete from zones '
'where id=:id', {'id': zone_id})
@require_admin_context
def zone_get(context, zone_id):
session = get_session()
result = session.query(models.Zone).filter_by(id=zone_id).first()
if not result:
raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
return result
@require_admin_context
def zone_get_all(context):
session = get_session()
return session.query(models.Zone).all()

View File

@ -508,17 +508,19 @@ def upgrade(migrate_engine):
# bind migrate_engine to your metadata # bind migrate_engine to your metadata
meta.bind = migrate_engine meta.bind = migrate_engine
for table in (auth_tokens, export_devices, fixed_ips, floating_ips, tables = [auth_tokens,
instances, key_pairs, networks, instances, key_pairs, networks, fixed_ips, floating_ips,
projects, quotas, security_groups, security_group_inst_assoc, quotas, security_groups, security_group_inst_assoc,
security_group_rules, services, users, security_group_rules, services, users, projects,
user_project_association, user_project_role_association, user_project_association, user_project_role_association,
user_role_association, volumes): user_role_association, volumes, export_devices]
for table in tables:
try: try:
table.create() table.create()
except Exception: except Exception:
logging.info(repr(table)) logging.info(repr(table))
logging.exception('Exception while creating table') logging.exception('Exception while creating table')
meta.drop_all(tables=tables)
raise raise

View File

@ -209,13 +209,16 @@ def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; # Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata # bind migrate_engine to your metadata
meta.bind = migrate_engine meta.bind = migrate_engine
for table in (certificates, consoles, console_pools, instance_actions,
iscsi_targets): tables = [certificates, console_pools, consoles, instance_actions,
iscsi_targets]
for table in tables:
try: try:
table.create() table.create()
except Exception: except Exception:
logging.info(repr(table)) logging.info(repr(table))
logging.exception('Exception while creating table') logging.exception('Exception while creating table')
meta.drop_all(tables=tables)
raise raise
auth_tokens.c.user_id.alter(type=String(length=255, auth_tokens.c.user_id.alter(type=String(length=255,

View File

@ -0,0 +1,51 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
#
# Tables to alter
#
networks_label = Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
networks.create_column(networks_label)

View File

@ -0,0 +1,61 @@
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
#
# New Tables
#
zones = Table('zones', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('api_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# Tables to alter
#
# (none currently)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (zones, ):
try:
table.create()
except Exception:
logging.info(repr(table))

View File

@ -17,12 +17,22 @@
# under the License. # under the License.
import os import os
import sys
from nova import flags from nova import flags
import sqlalchemy import sqlalchemy
from migrate.versioning import api as versioning_api from migrate.versioning import api as versioning_api
from migrate.versioning import exceptions as versioning_exceptions
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@ -45,8 +55,8 @@ def db_version():
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine) meta.reflect(bind=engine)
try: try:
for table in ('auth_tokens', 'export_devices', 'fixed_ips', for table in ('auth_tokens', 'zones', 'export_devices',
'floating_ips', 'instances', 'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas', 'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association', 'security_group_instance_association',
'security_group_rules', 'security_groups', 'security_group_rules', 'security_groups',

View File

@ -373,6 +373,7 @@ class Network(BASE, NovaBase):
"vpn_public_port"), "vpn_public_port"),
{'mysql_engine': 'InnoDB'}) {'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True) id = Column(Integer, primary_key=True)
label = Column(String(255))
injected = Column(Boolean, default=False) injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True) cidr = Column(String(255), unique=True)
@ -535,6 +536,15 @@ class Console(BASE, NovaBase):
pool = relationship(ConsolePool, backref=backref('consoles')) pool = relationship(ConsolePool, backref=backref('consoles'))
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
id = Column(Integer, primary_key=True)
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
def register_models(): def register_models():
"""Register Models and create metadata. """Register Models and create metadata.
@ -547,7 +557,7 @@ def register_models():
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule, Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User, SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console) # , Image, Host Project, Certificate, ConsolePool, Console, Zone)
engine = create_engine(FLAGS.sql_connection, echo=False) engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models: for model in models:
model.metadata.create_all(engine) model.metadata.create_all(engine)

View File

@ -282,6 +282,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state") "Top-level directory for maintaining nova's state")
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
DEFINE_string('sql_connection', DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite', 'sqlite:///$state_path/nova.sqlite',

View File

@ -28,9 +28,11 @@ It also allows setting of formatting information through flags.
import cStringIO import cStringIO
import inspect
import json import json
import logging import logging
import logging.handlers import logging.handlers
import os
import sys import sys
import traceback import traceback
@ -92,7 +94,7 @@ critical = logging.critical
log = logging.log log = logging.log
# handlers # handlers
StreamHandler = logging.StreamHandler StreamHandler = logging.StreamHandler
FileHandler = logging.FileHandler WatchedFileHandler = logging.handlers.WatchedFileHandler
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. # logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
SysLogHandler = logging.handlers.SysLogHandler SysLogHandler = logging.handlers.SysLogHandler
@ -111,6 +113,18 @@ def _dictify_context(context):
return context return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def get_log_file_path(binary=None):
if FLAGS.logfile:
return FLAGS.logfile
if FLAGS.logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
def basicConfig(): def basicConfig():
logging.basicConfig() logging.basicConfig()
for handler in logging.root.handlers: for handler in logging.root.handlers:
@ -123,8 +137,9 @@ def basicConfig():
syslog = SysLogHandler(address='/dev/log') syslog = SysLogHandler(address='/dev/log')
syslog.setFormatter(_formatter) syslog.setFormatter(_formatter)
logging.root.addHandler(syslog) logging.root.addHandler(syslog)
if FLAGS.logfile: logpath = get_log_file_path()
logfile = FileHandler(FLAGS.logfile) if logpath:
logfile = WatchedFileHandler(logpath)
logfile.setFormatter(_formatter) logfile.setFormatter(_formatter)
logging.root.addHandler(logfile) logging.root.addHandler(logfile)

View File

@ -44,7 +44,7 @@ flags.DEFINE_string('dhcp_domain',
flags.DEFINE_string('networks_path', '$state_path/networks', flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files') 'Location to keep network config files')
flags.DEFINE_string('public_interface', 'vlan1', flags.DEFINE_string('public_interface', 'eth0',
'Interface for public IP addresses') 'Interface for public IP addresses')
flags.DEFINE_string('vlan_interface', 'eth0', flags.DEFINE_string('vlan_interface', 'eth0',
'network device for vlans') 'network device for vlans')

View File

@ -110,6 +110,7 @@ class NetworkManager(manager.Manager):
This class must be subclassed to support specific topologies. This class must be subclassed to support specific topologies.
""" """
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs): def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver: if not network_driver:
@ -138,6 +139,19 @@ class NetworkManager(manager.Manager):
self.driver.ensure_floating_forward(floating_ip['address'], self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address) fixed_address)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
super(NetworkManager, self).periodic_tasks(context)
if self.timeout_fixed_ips:
now = utils.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def set_network_host(self, context, network_id): def set_network_host(self, context, network_id):
"""Safely sets the host of the network.""" """Safely sets the host of the network."""
LOG.debug(_("setting network host"), context=context) LOG.debug(_("setting network host"), context=context)
@ -306,6 +320,7 @@ class FlatManager(NetworkManager):
not do any setup in this mode, it must be done manually. Requests to not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server. 169.254.169.254 port 80 will need to be forwarded to the api server.
""" """
timeout_fixed_ips = False
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool.""" """Gets a fixed ip from the pool."""
@ -331,11 +346,12 @@ class FlatManager(NetworkManager):
pass pass
def create_networks(self, context, cidr, num_networks, network_size, def create_networks(self, context, cidr, num_networks, network_size,
cidr_v6, *args, **kwargs): cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters.""" """Create networks based on parameters."""
fixed_net = IPy.IP(cidr) fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6) fixed_net_v6 = IPy.IP(cidr_v6)
significant_bits_v6 = 64 significant_bits_v6 = 64
count = 1
for index in range(num_networks): for index in range(num_networks):
start = index * network_size start = index * network_size
significant_bits = 32 - int(math.log(network_size, 2)) significant_bits = 32 - int(math.log(network_size, 2))
@ -348,6 +364,11 @@ class FlatManager(NetworkManager):
net['gateway'] = str(project_net[1]) net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast()) net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2]) net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
net['label'] = "%s_%d" % (label, count)
else:
net['label'] = label
count += 1
if(FLAGS.use_ipv6): if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
@ -451,18 +472,6 @@ class VlanManager(NetworkManager):
instances in its subnet. instances in its subnet.
""" """
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
super(VlanManager, self).periodic_tasks(context)
now = datetime.datetime.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self): def init_host(self):
"""Do any initialization that needs to be run if this is a """Do any initialization that needs to be run if this is a
standalone service. standalone service.
@ -503,8 +512,14 @@ class VlanManager(NetworkManager):
network_ref['bridge']) network_ref['bridge'])
def create_networks(self, context, cidr, num_networks, network_size, def create_networks(self, context, cidr, num_networks, network_size,
cidr_v6, vlan_start, vpn_start): cidr_v6, vlan_start, vpn_start, **kwargs):
"""Create networks based on parameters.""" """Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if num_networks + vlan_start > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
fixed_net = IPy.IP(cidr) fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6) fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64 network_size_v6 = 1 << 64

View File

@ -107,7 +107,7 @@ class Bucket(object):
def is_authorized(self, context): def is_authorized(self, context):
try: try:
return context.user.is_admin() or \ return context.is_admin or \
self.owner_id == context.project_id self.owner_id == context.project_id
except Exception, e: except Exception, e:
return False return False

View File

@ -69,7 +69,7 @@ class Image(object):
# but only modified by admin or owner. # but only modified by admin or owner.
try: try:
return (self.metadata['isPublic'] and readonly) or \ return (self.metadata['isPublic'] and readonly) or \
context.user.is_admin() or \ context.is_admin or \
self.metadata['imageOwnerId'] == context.project_id self.metadata['imageOwnerId'] == context.project_id
except: except:
return False return False

View File

@ -29,6 +29,7 @@ import uuid
from carrot import connection as carrot_connection from carrot import connection as carrot_connection
from carrot import messaging from carrot import messaging
from eventlet import greenpool
from eventlet import greenthread from eventlet import greenthread
from nova import context from nova import context
@ -42,6 +43,8 @@ from nova import utils
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.rpc') LOG = logging.getLogger('nova.rpc')
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
class Connection(carrot_connection.BrokerConnection): class Connection(carrot_connection.BrokerConnection):
"""Connection instance object""" """Connection instance object"""
@ -155,11 +158,15 @@ class AdapterConsumer(TopicConsumer):
def __init__(self, connection=None, topic="broadcast", proxy=None): def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic) LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy self.proxy = proxy
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
super(AdapterConsumer, self).__init__(connection=connection, super(AdapterConsumer, self).__init__(connection=connection,
topic=topic) topic=topic)
def receive(self, *args, **kwargs):
self.pool.spawn_n(self._receive, *args, **kwargs)
@exception.wrap_exception @exception.wrap_exception
def receive(self, message_data, message): def _receive(self, message_data, message):
"""Magically looks for a method on the proxy object and calls it """Magically looks for a method on the proxy object and calls it
Message data should be a dictionary with two keys: Message data should be a dictionary with two keys:

View File

@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") self.assertEqual(middleware.limiter.__class__.__name__, "Limiter")
middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar')
self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy")
class LimiterTest(unittest.TestCase):
def test_limiter(self):
items = range(2000)
req = Request.blank('/')
self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=0')
self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=3')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=2005')
self.assertEqual(limited(items, req), [])
req = Request.blank('/?limit=10')
self.assertEqual(limited(items, req), items[:10])
req = Request.blank('/?limit=0')
self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?limit=3000')
self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=3&limit=1500')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req), [])

View File

@ -0,0 +1,161 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import unittest
from webob import Request
from nova.api.openstack.common import limited
class LimiterTest(unittest.TestCase):
"""
Unit tests for the `nova.api.openstack.common.limited` method which takes
in a list of items and, depending on the 'offset' and 'limit' GET params,
returns a subset or complete set of the given items.
"""
def setUp(self):
"""
Run before each test.
"""
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""
Test offset key works with 0.
"""
req = Request.blank('/?offset=0')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
"""
Test offset key works with a medium sized number.
"""
req = Request.blank('/?offset=10')
self.assertEqual(limited(self.tiny, req), [])
self.assertEqual(limited(self.small, req), self.small[10:])
self.assertEqual(limited(self.medium, req), self.medium[10:])
self.assertEqual(limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
"""
Test offset key works with a number over 1000 (max_limit).
"""
req = Request.blank('/?offset=1001')
self.assertEqual(limited(self.tiny, req), [])
self.assertEqual(limited(self.small, req), [])
self.assertEqual(limited(self.medium, req), [])
self.assertEqual(limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""
Test offset key works with a blank offset.
"""
req = Request.blank('/?offset=')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_offset_bad(self):
"""
Test offset key works with a BAD offset.
"""
req = Request.blank(u'/?offset=\u0020aa')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_nothing(self):
"""
Test request with no offset or limit
"""
req = Request.blank('/')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
"""
Test limit of zero.
"""
req = Request.blank('/?limit=0')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
"""
Test limit of 10.
"""
req = Request.blank('/?limit=10')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium[:10])
self.assertEqual(limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
"""
Test limit of 3000.
"""
req = Request.blank('/?limit=3000')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
"""
Test request with both limit and offset.
"""
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=3&limit=1500')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req), [])
def test_limiter_custom_max_limit(self):
"""
Test a max_limit other than 1000.
"""
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
req = Request.blank('/?offset=3&limit=0')
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3&limit=2500')
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req, max_limit=2000), [])

View File

@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import datetime
import json import json
import unittest import unittest
@ -39,6 +40,13 @@ def return_server(context, id):
return stub_instance(id) return stub_instance(id)
def return_server_with_addresses(private, public):
def _return_server(context, id):
return stub_instance(id, private_address=private,
public_addresses=public)
return _return_server
def return_servers(context, user_id=1): def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)] return [stub_instance(i, user_id) for i in xrange(5)]
@ -55,9 +63,45 @@ def instance_address(context, instance_id):
return None return None
def stub_instance(id, user_id=1): def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
return Instance(id=id, state=0, image_id=10, user_id=user_id, if public_addresses == None:
display_name='server%s' % id) public_addresses = list()
instance = {
"id": id,
"admin_pass": "",
"user_id": user_id,
"project_id": "",
"image_id": 10,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": "",
"key_data": "",
"state": 0,
"state_description": "",
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
"hostname": "",
"host": "",
"instance_type": "",
"user_data": "",
"reservation_id": "",
"mac_address": "",
"scheduled_at": datetime.datetime.now(),
"launched_at": datetime.datetime.now(),
"terminated_at": datetime.datetime.now(),
"availability_zone": "",
"display_name": "server%s" % id,
"display_description": "",
"locked": False}
instance["fixed_ip"] = {
"address": private_address,
"floating_ips": [{"address":ip} for ip in public_addresses]}
return instance
def fake_compute_api(cls, req, id): def fake_compute_api(cls, req, id):
@ -105,6 +149,22 @@ class ServersTest(unittest.TestCase):
self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['id'], '1')
self.assertEqual(res_dict['server']['name'], 'server1') self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_by_id_with_addresses(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], '1')
self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses']
self.assertEqual(len(addresses["public"]), len(public))
self.assertEqual(addresses["public"][0], public[0])
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
def test_get_server_list(self): def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers') req = webob.Request.blank('/v1.0/servers')
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
@ -281,6 +341,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202) self.assertEqual(res.status_int, 202)
def test_server_reset_network(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
req = webob.Request.blank('/v1.0/servers/1/reset_network')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_server_diagnostics(self): def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics") req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET" req.method = "GET"

View File

@ -0,0 +1,140 @@
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import stubout
import webob
import json
import nova.db
from nova import context
from nova import flags
from nova.api.openstack import zones
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
FLAGS.verbose = True
def zone_get(context, zone_id):
return dict(id=1, api_url='http://foo.com', username='bob',
password='xxx')
def zone_create(context, values):
zone = dict(id=1)
zone.update(values)
return zone
def zone_update(context, zone_id, values):
zone = dict(id=zone_id, api_url='http://foo.com', username='bob',
password='xxx')
zone.update(values)
return zone
def zone_delete(context, zone_id):
pass
def zone_get_all(context):
return [
dict(id=1, api_url='http://foo.com', username='bob',
password='xxx'),
dict(id=2, api_url='http://blah.com', username='alice',
password='qwerty')]
class ZonesTest(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
FLAGS.allow_admin_api = True
self.stubs.Set(nova.db, 'zone_get', zone_get)
self.stubs.Set(nova.db, 'zone_get_all', zone_get_all)
self.stubs.Set(nova.db, 'zone_update', zone_update)
self.stubs.Set(nova.db, 'zone_create', zone_create)
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
def test_get_zone_list(self):
req = webob.Request.blank('/v1.0/zones')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['zones']), 2)
def test_get_zone_by_id(self):
req = webob.Request.blank('/v1.0/zones/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('password' in res_dict['zone'])
self.assertEqual(res.status_int, 200)
def test_zone_delete(self):
req = webob.Request.blank('/v1.0/zones/1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
def test_zone_create(self):
body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
password='fubar'))
req = webob.Request.blank('/v1.0/zones')
req.method = 'POST'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
self.assertFalse('username' in res_dict['zone'])
def test_zone_update(self):
body = dict(zone=dict(username='zeb', password='sneaky'))
req = webob.Request.blank('/v1.0/zones/1')
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('username' in res_dict['zone'])
if __name__ == '__main__':
unittest.main()

View File

@ -202,6 +202,14 @@ class ComputeTestCase(test.TestCase):
self.compute.set_admin_password(self.context, instance_id) self.compute.set_admin_password(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_inject_file(self):
"""Ensure we can write a file to an instance"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.inject_file(self.context, instance_id, "/tmp/test",
"File Contents")
self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self): def test_snapshot(self):
"""Ensure instance can be snapshotted""" """Ensure instance can be snapshotted"""
instance_id = self._create_instance() instance_id = self._create_instance()

View File

@ -46,6 +46,27 @@ class RootLoggerTestCase(test.TestCase):
self.assert_(True) # didn't raise exception self.assert_(True) # didn't raise exception
class LogHandlerTestCase(test.TestCase):
def test_log_path_logdir(self):
self.flags(logdir='/some/path')
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_logfile(self):
self.flags(logfile='/some/path/foo-bar.log')
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_none(self):
self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
def test_log_path_logfile_overrides_logdir(self):
self.flags(logdir='/some/other/path',
logfile='/some/path/foo-bar.log')
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
class NovaFormatterTestCase(test.TestCase): class NovaFormatterTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(NovaFormatterTestCase, self).setUp() super(NovaFormatterTestCase, self).setUp()

View File

@ -32,6 +32,7 @@ from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volume_utils
from nova.virt.xenapi.vmops import SimpleDH from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs from nova.tests.glance import stubs as glance_stubs
@ -141,6 +142,10 @@ class XenAPIVolumeTestCase(test.TestCase):
self.stubs.UnsetAll() self.stubs.UnsetAll()
def reset_network(*args):
pass
class XenAPIVMTestCase(test.TestCase): class XenAPIVMTestCase(test.TestCase):
""" """
Unit tests for VM operations Unit tests for VM operations
@ -162,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs) stubs.stubout_stream_disk(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
glance_stubs.stubout_glance_client(self.stubs, glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance) glance_stubs.FakeGlance)
self.conn = xenapi_conn.get_connection(False) self.conn = xenapi_conn.get_connection(False)
@ -243,7 +249,8 @@ class XenAPIVMTestCase(test.TestCase):
# Check that the VM is running according to XenAPI. # Check that the VM is running according to XenAPI.
self.assertEquals(vm['power_state'], 'Running') self.assertEquals(vm['power_state'], 'Running')
def _test_spawn(self, image_id, kernel_id, ramdisk_id): def _test_spawn(self, image_id, kernel_id, ramdisk_id,
instance_type="m1.large"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1, values = {'name': 1,
'id': 1, 'id': 1,
@ -252,7 +259,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': image_id, 'image_id': image_id,
'kernel_id': kernel_id, 'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id, 'ramdisk_id': ramdisk_id,
'instance_type': 'm1.large', 'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
} }
conn = xenapi_conn.get_connection(False) conn = xenapi_conn.get_connection(False)
@ -260,6 +267,12 @@ class XenAPIVMTestCase(test.TestCase):
conn.spawn(instance) conn.spawn(instance)
self.check_vm_record(conn) self.check_vm_record(conn)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception,
self._test_spawn,
1, 2, 3, "m1.xlarge")
def test_spawn_raw_objectstore(self): def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore' FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None) self._test_spawn(1, None, None)

View File

@ -43,8 +43,6 @@ else:
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
'(will be prepended to $logfile)')
class TwistdServerOptions(ServerOptions): class TwistdServerOptions(ServerOptions):

View File

@ -20,13 +20,14 @@
System-level utilities and helper functions. System-level utilities and helper functions.
""" """
import base64
import datetime import datetime
import inspect import inspect
import json import json
import os import os
import random import random
import subprocess
import socket import socket
import string
import struct import struct
import sys import sys
import time import time
@ -36,6 +37,7 @@ import netaddr
from eventlet import event from eventlet import event
from eventlet import greenthread from eventlet import greenthread
from eventlet.green import subprocess
from nova import exception from nova import exception
from nova.exception import ProcessExecutionError from nova.exception import ProcessExecutionError
@ -235,6 +237,15 @@ def generate_mac():
return ':'.join(map(lambda x: "%02x" % x, mac)) return ':'.join(map(lambda x: "%02x" % x, mac))
def generate_password(length=20):
"""Generate a random sequence of letters and digits
to be used as a password. Note that this is not intended
to represent the ultimate in security.
"""
chrs = string.letters + string.digits
return "".join([random.choice(chrs) for i in xrange(length)])
def last_octet(address): def last_octet(address):
return int(address.split(".")[-1]) return int(address.split(".")[-1])
@ -476,3 +487,15 @@ def dumps(value):
def loads(s): def loads(s):
return json.loads(s) return json.loads(s)
def ensure_b64_encoding(val):
"""Safety method to ensure that values expected to be base64-encoded
actually are. If they are, the value is returned unchanged. Otherwise,
the encoded value is returned.
"""
try:
dummy = base64.decode(val)
return val
except TypeError:
return base64.b64encode(val)

View File

@ -152,6 +152,21 @@ class FakeConnection(object):
""" """
pass pass
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
The work will be done asynchronously. This function returns a
task that allows the caller to detect when it is complete.
"""
pass
def rescue(self, instance): def rescue(self, instance):
""" """
Rescue the specified instance. Rescue the specified instance.

View File

@ -286,6 +286,10 @@ class SessionBase(object):
rec['currently_attached'] = False rec['currently_attached'] = False
rec['device'] = '' rec['device'] = ''
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
def xenapi_request(self, methodname, params): def xenapi_request(self, methodname, params):
if methodname.startswith('login'): if methodname.startswith('login'):
self._login(methodname, params) self._login(methodname, params)

View File

@ -138,6 +138,16 @@ class VMHelper(HelperBase):
LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref return vm_ref
@classmethod
def ensure_free_mem(cls, session, instance):
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
host_free_mem = long(session.get_xenapi().host.
compute_free_memory(host))
return host_free_mem >= mem
@classmethod @classmethod
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new """Create a VBD record. Returns a Deferred that gives the new
@ -384,7 +394,7 @@ class VMHelper(HelperBase):
pv = True pv = True
elif pv_str.lower() == 'false': elif pv_str.lower() == 'false':
pv = False pv = False
LOG.debug(_("PV Kernel in VDI:%d"), pv) LOG.debug(_("PV Kernel in VDI:%s"), pv)
return pv return pv
@classmethod @classmethod

View File

@ -67,13 +67,19 @@ class VMOps(object):
raise exception.Duplicate(_('Attempted to create' raise exception.Duplicate(_('Attempted to create'
' non-unique name %s') % instance.name) ' non-unique name %s') % instance.name)
bridge = db.network_get_by_instance(context.get_admin_context(), #ensure enough free memory is available
instance['id'])['bridge'] if not VMHelper.ensure_free_mem(self._session, instance):
network_ref = \ name = instance['name']
NetworkHelper.find_network_with_bridge(self._session, bridge) LOG.exception(_('instance %(name)s: not enough free memory')
% locals())
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
return
user = AuthManager().get_user(instance.user_id) user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id) project = AuthManager().get_project(instance.project_id)
#if kernel is not present we must download a raw disk #if kernel is not present we must download a raw disk
if instance.kernel_id: if instance.kernel_id:
disk_image_type = ImageType.DISK disk_image_type = ImageType.DISK
@ -99,16 +105,70 @@ class VMOps(object):
instance, kernel, ramdisk, pv_kernel) instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
if network_ref: # write network info
VMHelper.create_vif(self._session, vm_ref, admin_context = context.get_admin_context()
network_ref, instance.mac_address)
# TODO(tr3buchet) - remove comment in multi-nic
# I've decided to go ahead and consider multiple IPs and networks
# at this stage even though they aren't implemented because these will
# be needed for multi-nic and there was no sense writing it for single
# network/single IP and then having to turn around and re-write it
IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
for network in db.network_get_all_by_instance(admin_context,
instance['id']):
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
def ip_dict(ip):
return {'netmask': network['netmask'],
'enabled': '1',
'ip': ip.address}
mac_id = instance.mac_address.replace(':', '')
location = 'vm-data/networking/%s' % mac_id
mapping = {'label': network['label'],
'gateway': network['gateway'],
'mac': instance.mac_address,
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_IPs]}
self.write_to_param_xenstore(vm_ref, {location: mapping})
# TODO(tr3buchet) - remove comment in multi-nic
# this bit here about creating the vifs will be updated
# in multi-nic to handle multiple IPs on the same network
# and multiple networks
# for now it works as there is only one of each
bridge = network['bridge']
network_ref = \
NetworkHelper.find_network_with_bridge(self._session, bridge)
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
LOG.debug(_('Starting VM %s...'), vm_ref) LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False) self._session.call_xenapi('VM.start', vm_ref, False, False)
instance_name = instance.name instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals()) % locals())
def _inject_onset_files():
onset_files = instance.onset_files
if onset_files:
# Check if this is a JSON-encoded string and convert if needed.
if isinstance(onset_files, basestring):
try:
onset_files = json.loads(onset_files)
except ValueError:
LOG.exception(_("Invalid value for onset_files: '%s'")
% onset_files)
onset_files = []
# Inject any files, if specified
for path, contents in instance.onset_files:
LOG.debug(_("Injecting file path: '%s'") % path)
self.inject_file(instance, path, contents)
# NOTE(armando): Do we really need to do this in virt? # NOTE(armando): Do we really need to do this in virt?
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
# reset_network afterwards
timer = utils.LoopingCall(f=None) timer = utils.LoopingCall(f=None)
def _wait_for_boot(): def _wait_for_boot():
@ -119,6 +179,8 @@ class VMOps(object):
if state == power_state.RUNNING: if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance['name']) LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop() timer.stop()
_inject_onset_files()
return True
except Exception, exc: except Exception, exc:
LOG.warn(exc) LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'), LOG.exception(_('instance %s: failed to boot'),
@ -127,8 +189,13 @@ class VMOps(object):
instance['id'], instance['id'],
power_state.SHUTDOWN) power_state.SHUTDOWN)
timer.stop() timer.stop()
return False
timer.f = _wait_for_boot timer.f = _wait_for_boot
# call reset networking
self.reset_network(instance)
return timer.start(interval=0.5, now=True) return timer.start(interval=0.5, now=True)
def _get_vm_opaque_ref(self, instance_or_vm): def _get_vm_opaque_ref(self, instance_or_vm):
@ -161,7 +228,8 @@ class VMOps(object):
instance_name = instance_or_vm.name instance_name = instance_or_vm.name
vm = VMHelper.lookup(self._session, instance_name) vm = VMHelper.lookup(self._session, instance_name)
if vm is None: if vm is None:
raise Exception(_('Instance not present %s') % instance_name) raise exception.NotFound(
_('Instance not present %s') % instance_name)
return vm return vm
def snapshot(self, instance, image_id): def snapshot(self, instance, image_id):
@ -255,6 +323,32 @@ class VMOps(object):
raise RuntimeError(resp_dict['message']) raise RuntimeError(resp_dict['message'])
return resp_dict['message'] return resp_dict['message']
def inject_file(self, instance, b64_path, b64_contents):
"""Write a file to the VM instance. The path to which it is to be
written and the contents of the file need to be supplied; both should
be base64-encoded to prevent errors with non-ASCII characters being
transmitted. If the agent does not support file injection, or the user
has disabled it, a NotImplementedError will be raised.
"""
# Files/paths *should* be base64-encoded at this point, but
# double-check to make sure.
b64_path = utils.ensure_b64_encoding(b64_path)
b64_contents = utils.ensure_b64_encoding(b64_contents)
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
args = {'id': transaction_id, 'b64_path': b64_path,
'b64_contents': b64_contents}
# If the agent doesn't support file injection, a NotImplementedError
# will be raised with the appropriate message.
resp = self._make_agent_call('inject_file', instance, '', args)
resp_dict = json.loads(resp)
if resp_dict['returncode'] != '0':
# There was some other sort of error; the message will contain
# a description of the error.
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
def _shutdown(self, instance, vm): def _shutdown(self, instance, vm):
"""Shutdown an instance """ """Shutdown an instance """
state = self.get_info(instance['name'])['state'] state = self.get_info(instance['name'])['state']
@ -389,6 +483,14 @@ class VMOps(object):
# TODO: implement this! # TODO: implement this!
return 'http://fakeajaxconsole/fake_url' return 'http://fakeajaxconsole/fake_url'
def reset_network(self, instance):
"""
Creates uuid arg to pass to make_agent_call and calls it.
"""
args = {'id': str(uuid.uuid4())}
resp = self._make_agent_call('resetnetwork', instance, '', args)
def list_from_xenstore(self, vm, path): def list_from_xenstore(self, vm, path):
"""Runs the xenstore-ls command to get a listing of all records """Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys, from 'path' downward. Returns a dict with the sub-paths as keys,
@ -458,6 +560,11 @@ class VMOps(object):
if 'TIMEOUT:' in err_msg: if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals()) 'VM id=%(instance_id)s; args=%(strargs)s') % locals())
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. VM id=%(instance_id)s;'
' args=%(strargs)s') % locals())
raise NotImplementedError(err_msg)
else: else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. ' LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals()) 'VM id=%(instance_id)s; args=%(strargs)s') % locals())

View File

@ -168,6 +168,12 @@ class XenAPIConnection(object):
"""Set the root/admin password on the VM instance""" """Set the root/admin password on the VM instance"""
self._vmops.set_admin_password(instance, new_pass) self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def destroy(self, instance): def destroy(self, instance):
"""Destroy VM instance""" """Destroy VM instance"""
self._vmops.destroy(instance) self._vmops.destroy(instance)
@ -188,6 +194,10 @@ class XenAPIConnection(object):
"""resume the specified instance""" """resume the specified instance"""
self._vmops.resume(instance, callback) self._vmops.resume(instance, callback)
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
def get_info(self, instance_id): def get_info(self, instance_id):
"""Return data about VM instance""" """Return data about VM instance"""
return self._vmops.get_info(instance_id) return self._vmops.get_info(instance_id)

View File

@ -49,7 +49,7 @@ class API(base.Base):
options = { options = {
'size': size, 'size': size,
'user_id': context.user.id, 'user_id': context.user_id,
'project_id': context.project_id, 'project_id': context.project_id,
'availability_zone': FLAGS.storage_availability_zone, 'availability_zone': FLAGS.storage_availability_zone,
'status': "creating", 'status': "creating",
@ -85,7 +85,7 @@ class API(base.Base):
return self.db.volume_get(context, volume_id) return self.db.volume_get(context, volume_id)
def get_all(self, context): def get_all(self, context):
if context.user.is_admin(): if context.is_admin:
return self.db.volume_get_all(context) return self.db.volume_get_all(context)
return self.db.volume_get_all_by_project(context, context.project_id) return self.db.volume_get_all_by_project(context, context.project_id)

View File

@ -111,10 +111,10 @@ class VolumeManager(manager.Manager):
LOG.debug(_("volume %s: creating export"), volume_ref['name']) LOG.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref) self.driver.create_export(context, volume_ref)
except Exception as e: except Exception:
self.db.volume_update(context, self.db.volume_update(context,
volume_ref['id'], {'status': 'error'}) volume_ref['id'], {'status': 'error'})
raise e raise
now = datetime.datetime.utcnow() now = datetime.datetime.utcnow()
self.db.volume_update(context, self.db.volume_update(context,
@ -137,11 +137,11 @@ class VolumeManager(manager.Manager):
self.driver.remove_export(context, volume_ref) self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['name']) LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref) self.driver.delete_volume(volume_ref)
except Exception as e: except Exception:
self.db.volume_update(context, self.db.volume_update(context,
volume_ref['id'], volume_ref['id'],
{'status': 'error_deleting'}) {'status': 'error_deleting'})
raise e raise
self.db.volume_destroy(context, volume_id) self.db.volume_destroy(context, volume_id)
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])

View File

@ -91,6 +91,17 @@ def password(self, arg_dict):
return resp return resp
@jsonify
def resetnetwork(self, arg_dict):
"""Writes a resquest to xenstore that tells the agent
to reset networking.
"""
arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''})
request_id = arg_dict['id']
arg_dict['path'] = "data/host/%s" % request_id
xenstore.write_record(self, arg_dict)
def _wait_for_agent(self, request_id, arg_dict): def _wait_for_agent(self, request_id, arg_dict):
"""Periodically checks xenstore for a response from the agent. """Periodically checks xenstore for a response from the agent.
The request is always written to 'data/host/{id}', and The request is always written to 'data/host/{id}', and
@ -124,4 +135,5 @@ def _wait_for_agent(self, request_id, arg_dict):
if __name__ == "__main__": if __name__ == "__main__":
XenAPIPlugin.dispatch( XenAPIPlugin.dispatch(
{"key_init": key_init, {"key_init": key_init,
"password": password}) "password": password,
"resetnetwork": resetnetwork})

View File

@ -36,7 +36,15 @@ pluginlib.configure_logging("xenstore")
def jsonify(fnc): def jsonify(fnc):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
return json.dumps(fnc(*args, **kwargs)) ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper return wrapper

File diff suppressed because it is too large Load Diff

View File

@ -94,9 +94,13 @@ DistUtilsExtra.auto.setup(name='nova',
packages=find_packages(exclude=['bin', 'smoketests']), packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True, include_package_data=True,
test_suite='nose.collector', test_suite='nose.collector',
scripts=['bin/nova-api', scripts=['bin/nova-ajax-console-proxy',
'bin/nova-api',
'bin/nova-combined',
'bin/nova-compute', 'bin/nova-compute',
'bin/nova-console',
'bin/nova-dhcpbridge', 'bin/nova-dhcpbridge',
'bin/nova-direct-api',
'bin/nova-import-canonical-imagestore', 'bin/nova-import-canonical-imagestore',
'bin/nova-instancemonitor', 'bin/nova-instancemonitor',
'bin/nova-logspool', 'bin/nova-logspool',
@ -105,5 +109,6 @@ DistUtilsExtra.auto.setup(name='nova',
'bin/nova-objectstore', 'bin/nova-objectstore',
'bin/nova-scheduler', 'bin/nova-scheduler',
'bin/nova-spoolsentry', 'bin/nova-spoolsentry',
'bin/stack',
'bin/nova-volume', 'bin/nova-volume',
'tools/nova-debug']) 'tools/nova-debug'])