Merged and fiexed conflicts with r515
This commit is contained in:
commit
4267c1f204
@ -6,6 +6,7 @@ keys
|
||||
networks
|
||||
nova.sqlite
|
||||
CA/cacert.pem
|
||||
CA/crl.pem
|
||||
CA/index.txt*
|
||||
CA/openssl.cnf
|
||||
CA/serial*
|
||||
|
11
.mailmap
11
.mailmap
@ -19,11 +19,14 @@
|
||||
<mordred@inaugust.com> <mordred@hudson>
|
||||
<paul@openstack.org> <pvoccio@castor.local>
|
||||
<paul@openstack.org> <paul.voccio@rackspace.com>
|
||||
<soren.hansen@rackspace.com> <soren@linux2go.dk>
|
||||
<todd@ansolabs.com> <todd@lapex>
|
||||
<todd@ansolabs.com> <todd@rubidine.com>
|
||||
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||
# These are from people who failed to set a proper committer
|
||||
. <root@tonbuntu>
|
||||
. <laner@controller>
|
||||
. <root@ubuntu>
|
||||
<vishvananda@gmail.com> <root@ubuntu>
|
||||
<sleepsonthefloor@gmail.com> <root@tonbuntu>
|
||||
<rlane@wikimedia.org> <laner@controller>
|
||||
<rconradharris@gmail.com> <rick.harris@rackspace.com>
|
||||
<corywright@gmail.com> <cory.wright@rackspace.com>
|
||||
<ant@openstack.org> <amesserl@rackspace.com>
|
||||
|
11
Authors
11
Authors
@ -1,11 +1,16 @@
|
||||
Andy Smith <code@term.ie>
|
||||
Anne Gentle <anne@openstack.org>
|
||||
Anthony Young <sleepsonthefloor@gmail.com>
|
||||
Antony Messerli <ant@openstack.org>
|
||||
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||
Chris Behrens <cbehrens@codestud.com>
|
||||
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||
Cory Wright <corywright@gmail.com>
|
||||
David Pravec <David.Pravec@danix.org>
|
||||
Dean Troyer <dtroyer@gmail.com>
|
||||
Devin Carlen <devin.carlen@gmail.com>
|
||||
Ed Leafe <ed@leafe.com>
|
||||
Eldar Nugaev <enugaev@griddynamics.com>
|
||||
Eric Day <eday@oddments.org>
|
||||
Ewan Mellor <ewan.mellor@citrix.com>
|
||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||
@ -13,6 +18,7 @@ Jay Pipes <jaypipes@gmail.com>
|
||||
Jesse Andrews <anotherjesse@gmail.com>
|
||||
Joe Heck <heckj@mac.com>
|
||||
Joel Moore <joelbm24@gmail.com>
|
||||
Jonathan Bryce <jbryce@jbryce.com>
|
||||
Josh Kearney <josh.kearney@rackspace.com>
|
||||
Joshua McKenty <jmckenty@gmail.com>
|
||||
Justin Santa Barbara <justin@fathomdb.com>
|
||||
@ -21,11 +27,16 @@ Michael Gundlach <michael.gundlach@rackspace.com>
|
||||
Monty Taylor <mordred@inaugust.com>
|
||||
Paul Voccio <paul@openstack.org>
|
||||
Rick Clark <rick@openstack.org>
|
||||
Rick Harris <rconradharris@gmail.com>
|
||||
Ryan Lane <rlane@wikimedia.org>
|
||||
Ryan Lucio <rlucio@internap.com>
|
||||
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
|
||||
Sandy Walsh <sandy.walsh@rackspace.com>
|
||||
Soren Hansen <soren.hansen@rackspace.com>
|
||||
Thierry Carrez <thierry@openstack.org>
|
||||
Todd Willey <todd@ansolabs.com>
|
||||
Trey Morris <trey.morris@rackspace.com>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||
|
||||
|
@ -16,16 +16,24 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# ARG is the id of the user
|
||||
export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1"
|
||||
mkdir INTER/$1
|
||||
cd INTER/$1
|
||||
# $1 is the id of the project and $2 is the subject of the cert
|
||||
NAME=$1
|
||||
SUBJ=$2
|
||||
mkdir -p projects/$NAME
|
||||
cd projects/$NAME
|
||||
cp ../../openssl.cnf.tmpl openssl.cnf
|
||||
sed -i -e s/%USERNAME%/$1/g openssl.cnf
|
||||
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
|
||||
mkdir certs crl newcerts private
|
||||
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||
echo "10" > serial
|
||||
touch index.txt
|
||||
openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
|
||||
openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ"
|
||||
cd ../../
|
||||
openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch
|
||||
# NOTE(vish): Disabling intermediate ca's because we don't actually need them.
|
||||
# It makes more sense to have each project have its own root ca.
|
||||
# openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
|
||||
# openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ"
|
||||
openssl ca -gencrl -config ./openssl.cnf -out crl.pem
|
||||
if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then
|
||||
sudo chown -R nova:nogroup .
|
||||
fi
|
||||
# cd ../../
|
||||
# openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch
|
||||
|
@ -25,4 +25,5 @@ else
|
||||
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||
touch index.txt
|
||||
echo "10" > serial
|
||||
openssl ca -gencrl -config ./openssl.cnf -out crl.pem
|
||||
fi
|
||||
|
34
nova/tests/validator_unittest.py → CA/genvpn.sh
Normal file → Executable file
34
nova/tests/validator_unittest.py → CA/genvpn.sh
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
@ -16,27 +17,20 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
# This gets zipped and run on the cloudpipe-managed OpenVPN server
|
||||
NAME=$1
|
||||
SUBJ=$2
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import validate
|
||||
mkdir -p projects/$NAME
|
||||
cd projects/$NAME
|
||||
|
||||
# generate a server priv key
|
||||
openssl genrsa -out server.key 2048
|
||||
|
||||
class ValidationTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(ValidationTestCase, self).setUp()
|
||||
# generate a server CSR
|
||||
openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ"
|
||||
|
||||
def tearDown(self):
|
||||
super(ValidationTestCase, self).tearDown()
|
||||
|
||||
def test_type_validation(self):
|
||||
self.assertTrue(type_case("foo", 5, 1))
|
||||
self.assertRaises(TypeError, type_case, "bar", "5", 1)
|
||||
self.assertRaises(TypeError, type_case, None, 5, 1)
|
||||
|
||||
|
||||
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
|
||||
def type_case(instanceid, size, number_of_instances):
|
||||
return True
|
||||
novauid=`getent passwd nova | awk -F: '{print $3}'`
|
||||
if [ ! -z "${novauid}" ] && [ "`id -u`" != "${novauid}" ]; then
|
||||
sudo chown -R nova:nogroup .
|
||||
fi
|
@ -24,7 +24,6 @@ dir = .
|
||||
|
||||
[ ca ]
|
||||
default_ca = CA_default
|
||||
unique_subject = no
|
||||
|
||||
[ CA_default ]
|
||||
serial = $dir/serial
|
||||
@ -32,6 +31,8 @@ database = $dir/index.txt
|
||||
new_certs_dir = $dir/newcerts
|
||||
certificate = $dir/cacert.pem
|
||||
private_key = $dir/private/cakey.pem
|
||||
unique_subject = no
|
||||
default_crl_days = 365
|
||||
default_days = 365
|
||||
default_md = md5
|
||||
preserve = no
|
||||
|
@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template
|
||||
include nova/compute/fakevirtinstance.xml
|
||||
include nova/compute/interfaces.template
|
||||
include nova/virt/interfaces.template
|
||||
include nova/virt/libvirt.*.xml.template
|
||||
include nova/virt/libvirt*.xml.template
|
||||
include nova/tests/CA/
|
||||
include nova/tests/CA/cacert.pem
|
||||
include nova/tests/CA/private/
|
||||
|
20
bin/nova-api
20
bin/nova-api
@ -17,9 +17,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Nova API daemon.
|
||||
"""
|
||||
|
||||
"""Starter script for Nova API."""
|
||||
|
||||
import gettext
|
||||
import os
|
||||
@ -35,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import api
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import server
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||
@ -46,15 +47,10 @@ flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
|
||||
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
||||
|
||||
|
||||
def main(_args):
|
||||
from nova import api
|
||||
from nova import wsgi
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
FLAGS(sys.argv)
|
||||
server = wsgi.Server()
|
||||
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
||||
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
||||
server.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
server.serve('nova-api', main)
|
||||
|
109
bin/nova-api-paste
Executable file
109
bin/nova-api-paste
Executable file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python
|
||||
# pylint: disable-msg=C0103
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Starter script for Nova API."""
|
||||
|
||||
import gettext
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from paste import deploy
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import wsgi
|
||||
|
||||
LOG = logging.getLogger('nova.api')
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
LOG.addHandler(logging.StreamHandler())
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
API_ENDPOINTS = ['ec2', 'openstack']
|
||||
|
||||
|
||||
def load_configuration(paste_config):
|
||||
"""Load the paste configuration from the config file and return it."""
|
||||
config = None
|
||||
# Try each known name to get the global DEFAULTS, which will give ports
|
||||
for name in API_ENDPOINTS:
|
||||
try:
|
||||
config = deploy.appconfig("config:%s" % paste_config, name=name)
|
||||
except LookupError:
|
||||
pass
|
||||
if config:
|
||||
verbose = config.get('verbose', None)
|
||||
if verbose:
|
||||
FLAGS.verbose = int(verbose) == 1
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
return config
|
||||
LOG.debug(_("Paste config at %s has no secion for known apis"),
|
||||
paste_config)
|
||||
print _("Paste config at %s has no secion for any known apis") % \
|
||||
paste_config
|
||||
os.exit(1)
|
||||
|
||||
|
||||
def launch_api(paste_config_file, section, server, port, host):
|
||||
"""Launch an api server from the specified port and IP."""
|
||||
LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
|
||||
app = deploy.loadapp('config:%s' % paste_config_file, name=section)
|
||||
server.start(app, int(port), host)
|
||||
|
||||
|
||||
def run_app(paste_config_file):
|
||||
LOG.debug(_("Using paste.deploy config at: %s"), configfile)
|
||||
config = load_configuration(paste_config_file)
|
||||
LOG.debug(_("Configuration: %r"), config)
|
||||
server = wsgi.Server()
|
||||
ip = config.get('host', '0.0.0.0')
|
||||
for api in API_ENDPOINTS:
|
||||
port = config.get("%s_port" % api, None)
|
||||
if not port:
|
||||
continue
|
||||
host = config.get("%s_host" % api, ip)
|
||||
launch_api(configfile, api, server, port, host)
|
||||
LOG.debug(_("All api servers launched, now waiting"))
|
||||
server.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
FLAGS(sys.argv)
|
||||
configfiles = ['/etc/nova/nova-api.conf']
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
configfiles.insert(0,
|
||||
os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
|
||||
for configfile in configfiles:
|
||||
if os.path.exists(configfile):
|
||||
run_app(configfile)
|
||||
break
|
||||
else:
|
||||
LOG.debug(_("Skipping missing configuration: %s"), configfile)
|
68
bin/nova-combined
Executable file
68
bin/nova-combined
Executable file
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Combined starter script for Nova services."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import api
|
||||
from nova import flags
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
|
||||
flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
|
||||
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
FLAGS(sys.argv)
|
||||
|
||||
compute = service.Service.create(binary='nova-compute')
|
||||
network = service.Service.create(binary='nova-network')
|
||||
volume = service.Service.create(binary='nova-volume')
|
||||
scheduler = service.Service.create(binary='nova-scheduler')
|
||||
#objectstore = service.Service.create(binary='nova-objectstore')
|
||||
|
||||
service.serve(compute, network, volume, scheduler)
|
||||
|
||||
server = wsgi.Server()
|
||||
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
||||
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
||||
server.wait()
|
@ -17,9 +17,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Twistd daemon for the nova compute nodes.
|
||||
"""
|
||||
"""Starter script for Nova Compute."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
@ -36,13 +37,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova import utils
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.Service.create() # pylint: disable=C0103
|
||||
service.serve()
|
||||
service.wait()
|
||||
|
@ -110,7 +110,6 @@ def main():
|
||||
FLAGS.num_networks = 5
|
||||
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'..',
|
||||
'_trial_temp',
|
||||
'nova.sqlite'))
|
||||
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||
action = argv[1]
|
||||
|
134
bin/nova-manage
134
bin/nova-manage
@ -53,6 +53,7 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import logging
|
||||
import os
|
||||
@ -72,6 +73,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
@ -96,47 +98,43 @@ class VpnCommands(object):
|
||||
self.manager = manager.AuthManager()
|
||||
self.pipe = pipelib.CloudPipe()
|
||||
|
||||
def list(self):
|
||||
"""Print a listing of the VPNs for all projects."""
|
||||
def list(self, project=None):
|
||||
"""Print a listing of the VPN data for one or all projects.
|
||||
|
||||
args: [project=all]"""
|
||||
print "%-12s\t" % 'project',
|
||||
print "%-20s\t" % 'ip:port',
|
||||
print "%-20s\t" % 'private_ip',
|
||||
print "%s" % 'state'
|
||||
for project in self.manager.get_projects():
|
||||
if project:
|
||||
projects = [self.manager.get_project(project)]
|
||||
else:
|
||||
projects = self.manager.get_projects()
|
||||
# NOTE(vish): This hits the database a lot. We could optimize
|
||||
# by getting all networks in one query and all vpns
|
||||
# in aother query, then doing lookups by project
|
||||
for project in projects:
|
||||
print "%-12s\t" % project.name,
|
||||
|
||||
try:
|
||||
s = "%s:%s" % (project.vpn_ip, project.vpn_port)
|
||||
except exception.NotFound:
|
||||
s = "None"
|
||||
print "%-20s\t" % s,
|
||||
|
||||
vpn = self._vpn_for(project.id)
|
||||
ipport = "%s:%s" % (project.vpn_ip, project.vpn_port)
|
||||
print "%-20s\t" % ipport,
|
||||
ctxt = context.get_admin_context()
|
||||
vpn = db.instance_get_project_vpn(ctxt, project.id)
|
||||
if vpn:
|
||||
command = "ping -c1 -w1 %s > /dev/null; echo $?"
|
||||
out, _err = utils.execute(command % vpn['private_dns_name'],
|
||||
check_exit_code=False)
|
||||
if out.strip() == '0':
|
||||
net = 'up'
|
||||
else:
|
||||
net = 'down'
|
||||
print vpn['private_dns_name'],
|
||||
print vpn['node_name'],
|
||||
print vpn['instance_id'],
|
||||
address = None
|
||||
state = 'down'
|
||||
if vpn.get('fixed_ip', None):
|
||||
address = vpn['fixed_ip']['address']
|
||||
if project.vpn_ip and utils.vpn_ping(project.vpn_ip,
|
||||
project.vpn_port):
|
||||
state = 'up'
|
||||
print address,
|
||||
print vpn['host'],
|
||||
print vpn['ec2_id'],
|
||||
print vpn['state_description'],
|
||||
print net
|
||||
|
||||
print state
|
||||
else:
|
||||
print None
|
||||
|
||||
def _vpn_for(self, project_id):
|
||||
"""Get the VPN instance for a project ID."""
|
||||
for instance in db.instance_get_all(context.get_admin_context()):
|
||||
if (instance['image_id'] == FLAGS.vpn_image_id
|
||||
and not instance['state_description'] in
|
||||
['shutting_down', 'shutdown']
|
||||
and instance['project_id'] == project_id):
|
||||
return instance
|
||||
|
||||
def spawn(self):
|
||||
"""Run all VPNs."""
|
||||
for p in reversed(self.manager.get_projects()):
|
||||
@ -149,6 +147,21 @@ class VpnCommands(object):
|
||||
"""Start the VPN for a given project."""
|
||||
self.pipe.launch_vpn_instance(project_id)
|
||||
|
||||
def change(self, project_id, ip, port):
|
||||
"""Change the ip and port for a vpn.
|
||||
|
||||
args: project, ip, port"""
|
||||
project = self.manager.get_project(project_id)
|
||||
if not project:
|
||||
print 'No project %s' % (project_id)
|
||||
return
|
||||
admin = context.get_admin_context()
|
||||
network_ref = db.project_get_network(admin, project_id)
|
||||
db.network_update(admin,
|
||||
network_ref['id'],
|
||||
{'vpn_public_address': ip,
|
||||
'vpn_public_port': int(port)})
|
||||
|
||||
|
||||
class ShellCommands(object):
|
||||
def bpython(self):
|
||||
@ -295,6 +308,14 @@ class UserCommands(object):
|
||||
is_admin = False
|
||||
self.manager.modify_user(name, access_key, secret_key, is_admin)
|
||||
|
||||
def revoke(self, user_id, project_id=None):
|
||||
"""revoke certs for a user
|
||||
arguments: user_id [project_id]"""
|
||||
if project_id:
|
||||
crypto.revoke_certs_by_user_and_project(user_id, project_id)
|
||||
else:
|
||||
crypto.revoke_certs_by_user(user_id)
|
||||
|
||||
|
||||
class ProjectCommands(object):
|
||||
"""Class for managing projects."""
|
||||
@ -436,6 +457,52 @@ class NetworkCommands(object):
|
||||
|
||||
|
||||
|
||||
|
||||
class ServiceCommands(object):
|
||||
"""Enable and disable running services"""
|
||||
|
||||
def list(self, host=None, service=None):
|
||||
"""Show a list of all running services. Filter by host & service name.
|
||||
args: [host] [service]"""
|
||||
ctxt = context.get_admin_context()
|
||||
now = datetime.datetime.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
if host:
|
||||
services = [s for s in services if s['host'] == host]
|
||||
if service:
|
||||
services = [s for s in services if s['binary'] == service]
|
||||
for svc in services:
|
||||
delta = now - (svc['updated_at'] or svc['created_at'])
|
||||
alive = (delta.seconds <= 15)
|
||||
art = (alive and ":-)") or "XXX"
|
||||
active = 'enabled'
|
||||
if svc['disabled']:
|
||||
active = 'disabled'
|
||||
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
|
||||
active, art,
|
||||
svc['updated_at'])
|
||||
|
||||
def enable(self, host, service):
|
||||
"""Enable scheduling for a service
|
||||
args: host service"""
|
||||
ctxt = context.get_admin_context()
|
||||
svc = db.service_get_by_args(ctxt, host, service)
|
||||
if not svc:
|
||||
print "Unable to find service"
|
||||
return
|
||||
db.service_update(ctxt, svc['id'], {'disabled': False})
|
||||
|
||||
def disable(self, host, service):
|
||||
"""Disable scheduling for a service
|
||||
args: host service"""
|
||||
ctxt = context.get_admin_context()
|
||||
svc = db.service_get_by_args(ctxt, host, service)
|
||||
if not svc:
|
||||
print "Unable to find service"
|
||||
return
|
||||
db.service_update(ctxt, svc['id'], {'disabled': True})
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@ -443,7 +510,8 @@ CATEGORIES = [
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands)]
|
||||
('network', NetworkCommands),
|
||||
('service', ServiceCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
@ -17,9 +17,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Twistd daemon for the nova network nodes.
|
||||
"""
|
||||
"""Starter script for Nova Network."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
@ -36,13 +37,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova import utils
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.Service.create() # pylint: disable-msg=C0103
|
||||
service.serve()
|
||||
service.wait()
|
||||
|
@ -17,9 +17,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Twistd daemon for the nova scheduler nodes.
|
||||
"""
|
||||
"""Starter script for Nova Scheduler."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
@ -36,13 +37,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova import utils
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.Service.create()
|
||||
service.serve()
|
||||
service.wait()
|
||||
|
@ -17,9 +17,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Twistd daemon for the nova volume nodes.
|
||||
"""
|
||||
"""Starter script for Nova Volume."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
@ -36,13 +37,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova import utils
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.Service.create() # pylint: disable-msg=C0103
|
||||
service.serve()
|
||||
service.wait()
|
||||
|
@ -15,7 +15,7 @@ if [ ! -n "$HOST_IP" ]; then
|
||||
# NOTE(vish): This will just get the first ip in the list, so if you
|
||||
# have more than one eth device set up, this will fail, and
|
||||
# you should explicitly set HOST_IP in your environment
|
||||
HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
|
||||
HOST_IP=`LC_ALL=C ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
|
||||
fi
|
||||
|
||||
USE_MYSQL=${USE_MYSQL:-0}
|
||||
|
@ -30,6 +30,8 @@ if [ -f /etc/default/nova-iptables ] ; then
|
||||
. /etc/default/nova-iptables
|
||||
fi
|
||||
|
||||
export LC_ALL=C
|
||||
|
||||
API_PORT=${API_PORT:-"8773"}
|
||||
|
||||
if [ ! -n "$IP" ]; then
|
||||
|
@ -1,5 +1,8 @@
|
||||
import gettext
|
||||
import os
|
||||
|
||||
gettext.install('nova')
|
||||
|
||||
from nova import utils
|
||||
|
||||
def setup(app):
|
||||
|
@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (
|
||||
Step 2: Install dependencies
|
||||
----------------------------
|
||||
|
||||
Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first.
|
||||
Nova requires rabbitmq for messaging, so install that first.
|
||||
|
||||
*Note:* You must have sudo installed to run these commands as shown here.
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install rabbitmq-server redis-server
|
||||
sudo apt-get install rabbitmq-server
|
||||
|
||||
|
||||
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
|
||||
@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install python-twisted
|
||||
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2
|
||||
sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list'
|
||||
sudo apt-get update && sudo apt-get install python-gflags
|
||||
sudo add-get install python-software-properties
|
||||
sudo add-apt-repository ppa:nova-core/trunk
|
||||
sudo apt-get update
|
||||
sudo apt-get install python-twisted python-gflags
|
||||
|
||||
|
||||
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
|
||||
|
@ -76,11 +76,11 @@ External unix tools that are required:
|
||||
* aoetools and vblade-persist (if you use aoe-volumes)
|
||||
|
||||
Nova uses cutting-edge versions of many packages. There are ubuntu packages in
|
||||
the nova-core ppa. You can use add this ppa to your sources list on an ubuntu
|
||||
machine with the following commands::
|
||||
the nova-core trunk ppa. You can use add this ppa to your sources list on an
|
||||
ubuntu machine with the following commands::
|
||||
|
||||
sudo apt-get install -y python-software-properties
|
||||
sudo add-apt-repository ppa:nova-core/ppa
|
||||
sudo add-apt-repository ppa:nova-core/trunk
|
||||
|
||||
Recommended
|
||||
-----------
|
||||
|
@ -46,12 +46,12 @@ Assumptions
|
||||
Step 1 Use apt-get to get the latest code
|
||||
-----------------------------------------
|
||||
|
||||
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa.
|
||||
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install python-software-properties
|
||||
sudo add-apt-repository ppa:nova-core/ppa
|
||||
sudo add-apt-repository ppa:nova-core/trunk
|
||||
|
||||
2. Run update.
|
||||
|
||||
@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20
|
||||
|
||||
#. These need to be defined in the nova.conf configuration file::
|
||||
|
||||
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
|
||||
--s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which
|
||||
# will contain the VM images and buckets
|
||||
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
|
||||
--cc_host=$CC_ADDR # This is where the the nova-api service lives
|
||||
--verbose # Optional but very helpful during initial setup
|
||||
--ec2_url=http://$CC_ADDR:8773/services/Cloud
|
||||
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
|
||||
|
||||
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
|
||||
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
|
||||
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
|
||||
--s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
|
||||
# will contain the VM images and buckets
|
||||
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
|
||||
--cc_host=$CC_ADDR # This is where the the nova-api service lives
|
||||
--verbose # Optional but very helpful during initial setup
|
||||
--ec2_url=http://$CC_ADDR:8773/services/Cloud
|
||||
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
|
||||
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
|
||||
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
|
||||
|
||||
#. Create a nova group::
|
||||
|
||||
sudo addgroup nova
|
||||
sudo addgroup nova
|
||||
|
||||
The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
|
||||
|
||||
|
@ -24,7 +24,7 @@ Routing
|
||||
|
||||
To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information.
|
||||
|
||||
URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ .
|
||||
URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` .
|
||||
|
||||
See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two:
|
||||
- mapper.connect() lets you map a single URL to a single action on a controller.
|
||||
@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju
|
||||
Controllers and actions
|
||||
-----------------------
|
||||
|
||||
Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller.
|
||||
Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller.
|
||||
|
||||
See nova/api/openstack/servers.py for an example.
|
||||
See `nova/api/openstack/servers.py` for an example.
|
||||
|
||||
Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
|
||||
|
||||
@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML
|
||||
|
||||
If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>).
|
||||
|
||||
See nova/api/openstack/servers.py for an example.
|
||||
See `nova/api/openstack/servers.py` for an example.
|
||||
|
||||
Faults
|
||||
------
|
||||
|
@ -71,8 +71,8 @@ RPC Casts
|
||||
|
||||
The diagram below the message flow during an rp.cast operation:
|
||||
|
||||
1. a Topic Publisher is instantiated to send the message request to the queuing system.
|
||||
2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
|
||||
1. A Topic Publisher is instantiated to send the message request to the queuing system.
|
||||
2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
|
||||
|
||||
.. image:: /images/rabbit/flow2.png
|
||||
:width: 60%
|
||||
|
@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo
|
||||
|
||||
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
|
||||
|
||||
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
|
||||
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
|
||||
|
||||
Concept: Storage
|
||||
----------------
|
||||
@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
|
||||
Flat DHCP Mode
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
|
||||
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
|
||||
|
||||
VLAN DHCP Mode
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
|
||||
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
|
||||
|
||||
The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
|
||||
|
||||
@ -154,16 +154,16 @@ Concept: nova-manage
|
||||
--------------------
|
||||
|
||||
The nova-manage command is used to perform many essential functions for
|
||||
administration and ongoing maintenance of nova, such as user creation,
|
||||
administration and ongoing maintenance of Nova, such as user creation,
|
||||
vpn management, and much more.
|
||||
|
||||
See doc:`nova.manage` in the Administration Guide for more details.
|
||||
See :doc:`nova.manage` in the Administration Guide for more details.
|
||||
|
||||
|
||||
Concept: Flags
|
||||
--------------
|
||||
|
||||
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
|
||||
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
|
||||
|
||||
|
||||
Concept: Plugins
|
||||
@ -181,7 +181,7 @@ Concept: Plugins
|
||||
Concept: IPC/RPC
|
||||
----------------
|
||||
|
||||
Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
|
||||
Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
|
||||
|
||||
Concept: Fakes
|
||||
--------------
|
||||
|
@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see
|
||||
* HOST_IP
|
||||
* Default: address of first interface from the ifconfig command
|
||||
* Values: 127.0.0.1, or any other valid address
|
||||
|
||||
TEST
|
||||
~~~~
|
||||
|
||||
**Default**: 0
|
||||
**Values**: 1, run tests after checkout and initial setup
|
||||
|
||||
USE_MYSQL
|
||||
~~~~~~~~~
|
||||
|
||||
**Default**: 0, use sqlite3
|
||||
**Values**: 1, use mysql instead of sqlite3
|
||||
|
||||
MYSQL_PASS
|
||||
~~~~~~~~~~
|
||||
|
||||
Only useful if $USE_MYSQL=1.
|
||||
|
||||
**Default**: nova
|
||||
**Values**: value of root password for mysql
|
||||
|
||||
USE_LDAP
|
||||
~~~~~~~~
|
||||
|
||||
**Default**: 0, use :mod:`nova.auth.dbdriver`
|
||||
**Values**: 1, use :mod:`nova.auth.ldapdriver`
|
||||
|
||||
LIBVIRT_TYPE
|
||||
~~~~~~~~~~~~
|
||||
|
||||
**Default**: qemu
|
||||
**Values**: uml, kvm
|
||||
* TEST
|
||||
* Default: 0
|
||||
* Values: 1, run tests after checkout and initial setup
|
||||
* USE_MYSQL
|
||||
* Default: 0, use sqlite3
|
||||
* Values: 1, use mysql instead of sqlite3
|
||||
* MYSQL_PASS (Only useful if $USE_MYSQL=1)
|
||||
* Default: nova
|
||||
* Values: value of root password for mysql
|
||||
* USE_LDAP
|
||||
* Default: 0, use :mod:`nova.auth.dbdriver`
|
||||
* Values: 1, use :mod:`nova.auth.ldapdriver`
|
||||
* LIBVIRT_TYPE
|
||||
* Default: qemu
|
||||
* Values: uml, kvm
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
63
etc/nova-api.conf
Normal file
63
etc/nova-api.conf
Normal file
@ -0,0 +1,63 @@
|
||||
[DEFAULT]
|
||||
verbose = 1
|
||||
ec2_port = 8773
|
||||
ec2_address = 0.0.0.0
|
||||
openstack_port = 8774
|
||||
openstack_address = 0.0.0.0
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/: ec2versions
|
||||
/services: ec2api
|
||||
/latest: ec2metadata
|
||||
/200: ec2metadata
|
||||
/1.0: ec2metadata
|
||||
|
||||
[pipeline:ec2api]
|
||||
pipeline = authenticate router authorizer ec2executor
|
||||
|
||||
[filter:authenticate]
|
||||
paste.filter_factory = nova.api.ec2:authenticate_factory
|
||||
|
||||
[filter:router]
|
||||
paste.filter_factory = nova.api.ec2:router_factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:authorizer_factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:executor_factory
|
||||
|
||||
[app:ec2versions]
|
||||
paste.app_factory = nova.api.ec2:versions_factory
|
||||
|
||||
[app:ec2metadata]
|
||||
paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:openstack]
|
||||
use = egg:Paste#urlmap
|
||||
/: osversions
|
||||
/v1.0: openstackapi
|
||||
|
||||
[pipeline:openstackapi]
|
||||
pipeline = auth ratelimit osapi
|
||||
|
||||
[filter:auth]
|
||||
paste.filter_factory = nova.api.openstack.auth:auth_factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory
|
||||
|
||||
[app:osapi]
|
||||
paste.app_factory = nova.api.openstack:router_factory
|
||||
|
||||
[app:osversions]
|
||||
paste.app_factory = nova.api.openstack:versions_factory
|
@ -194,6 +194,7 @@ class HostInfo(object):
|
||||
|
||||
|
||||
class NovaAdminClient(object):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clc_url=DEFAULT_CLC_URL,
|
||||
|
@ -24,14 +24,13 @@ Root WSGI middleware for all API controllers.
|
||||
:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
|
||||
|
||||
"""
|
||||
import logging
|
||||
|
||||
import routes
|
||||
import webob.dec
|
||||
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api import cloudpipe
|
||||
from nova.api import ec2
|
||||
from nova.api import openstack
|
||||
from nova.api.ec2 import metadatarequesthandler
|
||||
@ -41,6 +40,7 @@ flags.DEFINE_string('osapi_subdomain', 'api',
|
||||
'subdomain running the OpenStack API')
|
||||
flags.DEFINE_string('ec2api_subdomain', 'ec2',
|
||||
'subdomain running the EC2 API')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
@ -80,7 +80,6 @@ class API(wsgi.Router):
|
||||
mapper.connect('%s/{path_info:.*}' % s, controller=mrh,
|
||||
conditions=ec2api_subdomain)
|
||||
|
||||
mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API())
|
||||
super(API, self).__init__(mapper)
|
||||
|
||||
@webob.dec.wsgify
|
||||
|
@ -1,69 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
REST API Request Handlers for CloudPipe
|
||||
"""
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import crypto
|
||||
from nova import wsgi
|
||||
from nova.auth import manager
|
||||
from nova.api.ec2 import cloud
|
||||
|
||||
|
||||
_log = logging.getLogger("api")
|
||||
_log.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
class API(wsgi.Application):
|
||||
|
||||
def __init__(self):
|
||||
self.controller = cloud.CloudController()
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
if req.method == 'POST':
|
||||
return self.sign_csr(req)
|
||||
_log.debug("Cloudpipe path is %s" % req.path_info)
|
||||
if req.path_info.endswith("/getca/"):
|
||||
return self.send_root_ca(req)
|
||||
return webob.exc.HTTPNotFound()
|
||||
|
||||
def get_project_id_from_ip(self, ip):
|
||||
# TODO(eday): This was removed with the ORM branch, fix!
|
||||
instance = self.controller.get_instance_by_ip(ip)
|
||||
return instance['project_id']
|
||||
|
||||
def send_root_ca(self, req):
|
||||
_log.debug("Getting root ca")
|
||||
project_id = self.get_project_id_from_ip(req.remote_addr)
|
||||
res = webob.Response()
|
||||
res.headers["Content-Type"] = "text/plain"
|
||||
res.body = crypto.fetch_ca(project_id)
|
||||
return res
|
||||
|
||||
def sign_csr(self, req):
|
||||
project_id = self.get_project_id_from_ip(req.remote_addr)
|
||||
cert = self.str_params['cert']
|
||||
return crypto.sign_csr(urllib.unquote(cert), project_id)
|
@ -26,8 +26,8 @@ import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import exception
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import wsgi
|
||||
from nova.api.ec2 import apirequest
|
||||
@ -37,16 +37,82 @@ from nova.auth import manager
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_boolean('use_forwarded_for', False,
|
||||
'Treat X-Forwarded-For as the canonical remote address. '
|
||||
'Only enable this if you have a sanitizing proxy.')
|
||||
flags.DEFINE_boolean('use_lockout', False,
|
||||
'Whether or not to use lockout middleware.')
|
||||
flags.DEFINE_integer('lockout_attempts', 5,
|
||||
'Number of failed auths before lockout.')
|
||||
flags.DEFINE_integer('lockout_minutes', 15,
|
||||
'Number of minutes to lockout if triggered.')
|
||||
flags.DEFINE_integer('lockout_window', 15,
|
||||
'Number of minutes for lockout window.')
|
||||
flags.DEFINE_list('lockout_memcached_servers', None,
|
||||
'Memcached servers or None for in process cache.')
|
||||
|
||||
|
||||
_log = logging.getLogger("api")
|
||||
_log.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
class API(wsgi.Middleware):
|
||||
|
||||
"""Routing for all EC2 API requests."""
|
||||
|
||||
def __init__(self):
|
||||
self.application = Authenticate(Router(Authorizer(Executor())))
|
||||
if FLAGS.use_lockout:
|
||||
self.application = Lockout(self.application)
|
||||
|
||||
|
||||
class Lockout(wsgi.Middleware):
|
||||
"""Lockout for x minutes on y failed auths in a z minute period.
|
||||
|
||||
x = lockout_timeout flag
|
||||
y = lockout_window flag
|
||||
z = lockout_attempts flag
|
||||
|
||||
Uses memcached if lockout_memcached_servers flag is set, otherwise it
|
||||
uses a very simple in-proccess cache. Due to the simplicity of
|
||||
the implementation, the timeout window is started with the first
|
||||
failed request, so it will block if there are x failed logins within
|
||||
that period.
|
||||
|
||||
There is a possible race condition where simultaneous requests could
|
||||
sneak in before the lockout hits, but this is extremely rare and would
|
||||
only result in a couple of extra failed attempts."""
|
||||
|
||||
def __init__(self, application):
|
||||
"""middleware can use fake for testing."""
|
||||
if FLAGS.lockout_memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
self.mc = memcache.Client(FLAGS.lockout_memcached_servers,
|
||||
debug=0)
|
||||
super(Lockout, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
access_key = str(req.params['AWSAccessKeyId'])
|
||||
failures_key = "authfailures-%s" % access_key
|
||||
failures = int(self.mc.get(failures_key) or 0)
|
||||
if failures >= FLAGS.lockout_attempts:
|
||||
detail = "Too many failed authentications."
|
||||
raise webob.exc.HTTPForbidden(detail=detail)
|
||||
res = req.get_response(self.application)
|
||||
if res.status_int == 403:
|
||||
failures = self.mc.incr(failures_key)
|
||||
if failures is None:
|
||||
# NOTE(vish): To use incr, failures has to be a string.
|
||||
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
|
||||
elif failures >= FLAGS.lockout_attempts:
|
||||
_log.warn('Access key %s has had %d failed authentications'
|
||||
' and will be locked out for %d minutes.' %
|
||||
(access_key, failures, FLAGS.lockout_minutes))
|
||||
self.mc.set(failures_key, str(failures),
|
||||
time=FLAGS.lockout_minutes * 60)
|
||||
return res
|
||||
|
||||
|
||||
class Authenticate(wsgi.Middleware):
|
||||
@ -77,13 +143,16 @@ class Authenticate(wsgi.Middleware):
|
||||
req.host,
|
||||
req.path)
|
||||
except exception.Error, ex:
|
||||
logging.debug("Authentication Failure: %s" % ex)
|
||||
logging.debug(_("Authentication Failure: %s") % ex)
|
||||
raise webob.exc.HTTPForbidden()
|
||||
|
||||
# Authenticated!
|
||||
remote_address = req.remote_addr
|
||||
if FLAGS.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||
ctxt = context.RequestContext(user=user,
|
||||
project=project,
|
||||
remote_address=req.remote_addr)
|
||||
remote_address=remote_address)
|
||||
req.environ['ec2.context'] = ctxt
|
||||
return self.application
|
||||
|
||||
@ -120,9 +189,9 @@ class Router(wsgi.Middleware):
|
||||
except:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
_log.debug('action: %s' % action)
|
||||
_log.debug(_('action: %s') % action)
|
||||
for key, value in args.items():
|
||||
_log.debug('arg: %s\t\tval: %s' % (key, value))
|
||||
_log.debug(_('arg: %s\t\tval: %s') % (key, value))
|
||||
|
||||
# Success!
|
||||
req.environ['ec2.controller'] = controller
|
||||
@ -225,10 +294,9 @@ class Executor(wsgi.Application):
|
||||
args = req.environ['ec2.action_args']
|
||||
|
||||
api_request = apirequest.APIRequest(controller, action)
|
||||
result = None
|
||||
try:
|
||||
result = api_request.send(context, **args)
|
||||
req.headers['Content-Type'] = 'text/xml'
|
||||
return result
|
||||
except exception.ApiError as ex:
|
||||
|
||||
if ex.code:
|
||||
@ -238,6 +306,12 @@ class Executor(wsgi.Application):
|
||||
# TODO(vish): do something more useful with unknown exceptions
|
||||
except Exception as ex:
|
||||
return self._error(req, type(ex).__name__, str(ex))
|
||||
else:
|
||||
resp = webob.Response()
|
||||
resp.status = 200
|
||||
resp.headers['Content-Type'] = 'text/xml'
|
||||
resp.body = str(result)
|
||||
return resp
|
||||
|
||||
def _error(self, req, code, message):
|
||||
logging.error("%s: %s", code, message)
|
||||
@ -249,3 +323,49 @@ class Executor(wsgi.Application):
|
||||
'<Message>%s</Message></Error></Errors>'
|
||||
'<RequestID>?</RequestID></Response>' % (code, message))
|
||||
return resp
|
||||
|
||||
|
||||
class Versions(wsgi.Application):
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Respond to a request for all EC2 versions."""
|
||||
# available api versions
|
||||
versions = [
|
||||
'1.0',
|
||||
'2007-01-19',
|
||||
'2007-03-01',
|
||||
'2007-08-29',
|
||||
'2007-10-10',
|
||||
'2007-12-15',
|
||||
'2008-02-01',
|
||||
'2008-09-01',
|
||||
'2009-04-04',
|
||||
]
|
||||
return ''.join('%s\n' % v for v in versions)
|
||||
|
||||
|
||||
def authenticate_factory(global_args, **local_args):
|
||||
def authenticator(app):
|
||||
return Authenticate(app)
|
||||
return authenticator
|
||||
|
||||
|
||||
def router_factory(global_args, **local_args):
|
||||
def router(app):
|
||||
return Router(app)
|
||||
return router
|
||||
|
||||
|
||||
def authorizer_factory(global_args, **local_args):
|
||||
def authorizer(app):
|
||||
return Authorizer(app)
|
||||
return authorizer
|
||||
|
||||
|
||||
def executor_factory(global_args, **local_args):
|
||||
return Executor()
|
||||
|
||||
|
||||
def versions_factory(global_args, **local_args):
|
||||
return Versions()
|
||||
|
@ -168,6 +168,7 @@ class AdminController(object):
|
||||
|
||||
# FIXME(vish): these host commands don't work yet, perhaps some of the
|
||||
# required data can be retrieved from service objects?
|
||||
|
||||
def describe_hosts(self, _context, **_kwargs):
|
||||
"""Returns status info for all nodes. Includes:
|
||||
* Disk Space
|
||||
|
@ -92,8 +92,8 @@ class APIRequest(object):
|
||||
method = getattr(self.controller,
|
||||
_camelcase_to_underscore(self.action))
|
||||
except AttributeError:
|
||||
_error = ('Unsupported API request: controller = %s,'
|
||||
'action = %s') % (self.controller, self.action)
|
||||
_error = _('Unsupported API request: controller = %s,'
|
||||
'action = %s') % (self.controller, self.action)
|
||||
_log.warning(_error)
|
||||
# TODO: Raise custom exception, trap in apiserver,
|
||||
# and reraise as 400 error.
|
||||
|
@ -27,7 +27,6 @@ import datetime
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
|
||||
from nova import context
|
||||
import IPy
|
||||
@ -114,7 +113,7 @@ class CloudController(object):
|
||||
start = os.getcwd()
|
||||
os.chdir(FLAGS.ca_path)
|
||||
# TODO(vish): Do this with M2Crypto instead
|
||||
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
|
||||
utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
|
||||
os.chdir(start)
|
||||
|
||||
def _get_mpi_data(self, context, project_id):
|
||||
@ -189,22 +188,63 @@ class CloudController(object):
|
||||
return data
|
||||
|
||||
def describe_availability_zones(self, context, **kwargs):
|
||||
if ('zone_name' in kwargs and
|
||||
'verbose' in kwargs['zone_name'] and
|
||||
context.is_admin):
|
||||
return self._describe_availability_zones_verbose(context,
|
||||
**kwargs)
|
||||
else:
|
||||
return self._describe_availability_zones(context, **kwargs)
|
||||
|
||||
def _describe_availability_zones(self, context, **kwargs):
|
||||
return {'availabilityZoneInfo': [{'zoneName': 'nova',
|
||||
'zoneState': 'available'}]}
|
||||
|
||||
def _describe_availability_zones_verbose(self, context, **kwargs):
|
||||
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
|
||||
'zoneState': 'available'}]}
|
||||
|
||||
services = db.service_get_all(context)
|
||||
now = db.get_time()
|
||||
hosts = []
|
||||
for host in [service['host'] for service in services]:
|
||||
if not host in hosts:
|
||||
hosts.append(host)
|
||||
for host in hosts:
|
||||
rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host,
|
||||
'zoneState': ''})
|
||||
hsvcs = [service for service in services \
|
||||
if service['host'] == host]
|
||||
for svc in hsvcs:
|
||||
delta = now - (svc['updated_at'] or svc['created_at'])
|
||||
alive = (delta.seconds <= FLAGS.service_down_time)
|
||||
art = (alive and ":-)") or "XXX"
|
||||
active = 'enabled'
|
||||
if svc['disabled']:
|
||||
active = 'disabled'
|
||||
rv['availabilityZoneInfo'].append({
|
||||
'zoneName': '| |- %s' % svc['binary'],
|
||||
'zoneState': '%s %s %s' % (active, art,
|
||||
svc['updated_at'])})
|
||||
return rv
|
||||
|
||||
def describe_regions(self, context, region_name=None, **kwargs):
|
||||
if FLAGS.region_list:
|
||||
regions = []
|
||||
for region in FLAGS.region_list:
|
||||
name, _sep, url = region.partition('=')
|
||||
name, _sep, host = region.partition('=')
|
||||
endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix,
|
||||
host,
|
||||
FLAGS.cc_port,
|
||||
FLAGS.ec2_suffix)
|
||||
regions.append({'regionName': name,
|
||||
'regionEndpoint': url})
|
||||
'regionEndpoint': endpoint})
|
||||
else:
|
||||
regions = [{'regionName': 'nova',
|
||||
'regionEndpoint': FLAGS.ec2_url}]
|
||||
if region_name:
|
||||
regions = [r for r in regions if r['regionName'] in region_name]
|
||||
return {'regionInfo': regions}
|
||||
'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
|
||||
FLAGS.cc_host,
|
||||
FLAGS.cc_port,
|
||||
FLAGS.ec2_suffix)}]
|
||||
|
||||
def describe_snapshots(self,
|
||||
context,
|
||||
@ -319,11 +359,11 @@ class CloudController(object):
|
||||
ip_protocol = str(ip_protocol)
|
||||
|
||||
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
|
||||
raise InvalidInputException('%s is not a valid ipProtocol' %
|
||||
raise InvalidInputException(_('%s is not a valid ipProtocol') %
|
||||
(ip_protocol,))
|
||||
if ((min(from_port, to_port) < -1) or
|
||||
(max(from_port, to_port) > 65535)):
|
||||
raise InvalidInputException('Invalid port range')
|
||||
raise InvalidInputException(_('Invalid port range'))
|
||||
|
||||
values['protocol'] = ip_protocol
|
||||
values['from_port'] = from_port
|
||||
@ -361,7 +401,8 @@ class CloudController(object):
|
||||
|
||||
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
|
||||
if criteria == None:
|
||||
raise exception.ApiError("No rule for the specified parameters.")
|
||||
raise exception.ApiError(_("No rule for the specified "
|
||||
"parameters."))
|
||||
|
||||
for rule in security_group.rules:
|
||||
match = True
|
||||
@ -372,7 +413,7 @@ class CloudController(object):
|
||||
db.security_group_rule_destroy(context, rule['id'])
|
||||
self._trigger_refresh_security_group(context, security_group)
|
||||
return True
|
||||
raise exception.ApiError("No rule for the specified parameters.")
|
||||
raise exception.ApiError(_("No rule for the specified parameters."))
|
||||
|
||||
# TODO(soren): This has only been tested with Boto as the client.
|
||||
# Unfortunately, it seems Boto is using an old API
|
||||
@ -388,8 +429,8 @@ class CloudController(object):
|
||||
values['parent_group_id'] = security_group.id
|
||||
|
||||
if self._security_group_rule_exists(security_group, values):
|
||||
raise exception.ApiError('This rule already exists in group %s' %
|
||||
group_name)
|
||||
raise exception.ApiError(_('This rule already exists in group %s')
|
||||
% group_name)
|
||||
|
||||
security_group_rule = db.security_group_rule_create(context, values)
|
||||
|
||||
@ -417,7 +458,7 @@ class CloudController(object):
|
||||
def create_security_group(self, context, group_name, group_description):
|
||||
self.compute_api.ensure_default_security_group(context)
|
||||
if db.security_group_exists(context, context.project_id, group_name):
|
||||
raise exception.ApiError('group %s already exists' % group_name)
|
||||
raise exception.ApiError(_('group %s already exists') % group_name)
|
||||
|
||||
group = {'user_id': context.user.id,
|
||||
'project_id': context.project_id,
|
||||
@ -530,13 +571,13 @@ class CloudController(object):
|
||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
|
||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||
raise exception.ApiError("Invalid device specified: %s. "
|
||||
"Example device: /dev/vdb" % device)
|
||||
raise exception.ApiError(_("Invalid device specified: %s. "
|
||||
"Example device: /dev/vdb") % device)
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] != "available":
|
||||
raise exception.ApiError("Volume status must be available")
|
||||
raise exception.ApiError(_("Volume status must be available"))
|
||||
if volume_ref['attach_status'] == "attached":
|
||||
raise exception.ApiError("Volume is already attached")
|
||||
raise exception.ApiError(_("Volume is already attached"))
|
||||
internal_id = ec2_id_to_internal_id(instance_id)
|
||||
instance_ref = self.compute_api.get_instance(context, internal_id)
|
||||
host = instance_ref['host']
|
||||
@ -558,10 +599,10 @@ class CloudController(object):
|
||||
instance_ref = db.volume_get_instance(context.elevated(),
|
||||
volume_ref['id'])
|
||||
if not instance_ref:
|
||||
raise exception.ApiError("Volume isn't attached to anything!")
|
||||
raise exception.ApiError(_("Volume isn't attached to anything!"))
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] == "available":
|
||||
raise exception.ApiError("Volume is already detached")
|
||||
raise exception.ApiError(_("Volume is already detached"))
|
||||
try:
|
||||
host = instance_ref['host']
|
||||
rpc.cast(context,
|
||||
@ -696,23 +737,29 @@ class CloudController(object):
|
||||
def allocate_address(self, context, **kwargs):
|
||||
# check quota
|
||||
if quota.allowed_floating_ips(context, 1) < 1:
|
||||
logging.warn("Quota exceeeded for %s, tried to allocate address",
|
||||
logging.warn(_("Quota exceeeded for %s, tried to allocate "
|
||||
"address"),
|
||||
context.project_id)
|
||||
raise quota.QuotaError("Address quota exceeded. You cannot "
|
||||
"allocate any more addresses")
|
||||
network_topic = self._get_network_topic(context)
|
||||
raise quota.QuotaError(_("Address quota exceeded. You cannot "
|
||||
"allocate any more addresses"))
|
||||
# NOTE(vish): We don't know which network host should get the ip
|
||||
# when we allocate, so just send it to any one. This
|
||||
# will probably need to move into a network supervisor
|
||||
# at some point.
|
||||
public_ip = rpc.call(context,
|
||||
network_topic,
|
||||
FLAGS.network_topic,
|
||||
{"method": "allocate_floating_ip",
|
||||
"args": {"project_id": context.project_id}})
|
||||
return {'addressSet': [{'publicIp': public_ip}]}
|
||||
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
# NOTE(vish): Should we make sure this works?
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = self._get_network_topic(context)
|
||||
# NOTE(vish): We don't know which network host should get the ip
|
||||
# when we deallocate, so just send it to any one. This
|
||||
# will probably need to move into a network supervisor
|
||||
# at some point.
|
||||
rpc.cast(context,
|
||||
network_topic,
|
||||
FLAGS.network_topic,
|
||||
{"method": "deallocate_floating_ip",
|
||||
"args": {"floating_address": floating_ip_ref['address']}})
|
||||
return {'releaseResponse': ["Address released."]}
|
||||
@ -723,7 +770,10 @@ class CloudController(object):
|
||||
fixed_address = db.instance_get_fixed_address(context,
|
||||
instance_ref['id'])
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = self._get_network_topic(context)
|
||||
# NOTE(vish): Perhaps we should just pass this on to compute and
|
||||
# let compute communicate with network.
|
||||
network_topic = self.compute_api.get_network_topic(context,
|
||||
internal_id)
|
||||
rpc.cast(context,
|
||||
network_topic,
|
||||
{"method": "associate_floating_ip",
|
||||
@ -733,24 +783,18 @@ class CloudController(object):
|
||||
|
||||
def disassociate_address(self, context, public_ip, **kwargs):
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = self._get_network_topic(context)
|
||||
# NOTE(vish): Get the topic from the host name of the network of
|
||||
# the associated fixed ip.
|
||||
if not floating_ip_ref.get('fixed_ip'):
|
||||
raise exception.ApiError('Address is not associated.')
|
||||
host = floating_ip_ref['fixed_ip']['network']['host']
|
||||
topic = db.queue_get_for(context, FLAGS.network_topic, host)
|
||||
rpc.cast(context,
|
||||
network_topic,
|
||||
topic,
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"floating_address": floating_ip_ref['address']}})
|
||||
return {'disassociateResponse': ["Address disassociated."]}
|
||||
|
||||
def _get_network_topic(self, context):
|
||||
"""Retrieves the network host for a project"""
|
||||
network_ref = self.network_manager.get_network(context)
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
host = rpc.call(context,
|
||||
FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"network_id": network_ref['id']}})
|
||||
return db.queue_get_for(context, FLAGS.network_topic, host)
|
||||
|
||||
def run_instances(self, context, **kwargs):
|
||||
max_count = int(kwargs.get('max_count', 1))
|
||||
instances = self.compute_api.create_instances(context,
|
||||
@ -758,12 +802,15 @@ class CloudController(object):
|
||||
kwargs['image_id'],
|
||||
min_count=int(kwargs.get('min_count', max_count)),
|
||||
max_count=max_count,
|
||||
kernel_id=kwargs.get('kernel_id'),
|
||||
kernel_id=kwargs.get('kernel_id', None),
|
||||
ramdisk_id=kwargs.get('ramdisk_id'),
|
||||
display_name=kwargs.get('display_name'),
|
||||
description=kwargs.get('display_description'),
|
||||
key_name=kwargs.get('key_name'),
|
||||
user_data=kwargs.get('user_data'),
|
||||
security_group=kwargs.get('security_group'),
|
||||
availability_zone=kwargs.get('placement', {}).get(
|
||||
'AvailabilityZone'),
|
||||
generate_hostname=internal_id_to_ec2_id)
|
||||
return self._format_run_instances(context,
|
||||
instances[0]['reservation_id'])
|
||||
@ -812,7 +859,7 @@ class CloudController(object):
|
||||
# TODO: return error if not authorized
|
||||
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
|
||||
if volume_ref['status'] != "available":
|
||||
raise exception.ApiError("Volume status must be available")
|
||||
raise exception.ApiError(_("Volume status must be available"))
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context, volume_ref['id'], {'status': 'deleting',
|
||||
'terminated_at': now})
|
||||
@ -843,11 +890,12 @@ class CloudController(object):
|
||||
|
||||
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
|
||||
if attribute != 'launchPermission':
|
||||
raise exception.ApiError('attribute not supported: %s' % attribute)
|
||||
raise exception.ApiError(_('attribute not supported: %s')
|
||||
% attribute)
|
||||
try:
|
||||
image = self.image_service.show(context, image_id)
|
||||
except IndexError:
|
||||
raise exception.ApiError('invalid id: %s' % image_id)
|
||||
raise exception.ApiError(_('invalid id: %s') % image_id)
|
||||
result = {'image_id': image_id, 'launchPermission': []}
|
||||
if image['isPublic']:
|
||||
result['launchPermission'].append({'group': 'all'})
|
||||
@ -857,13 +905,14 @@ class CloudController(object):
|
||||
operation_type, **kwargs):
|
||||
# TODO(devcamcar): Support users and groups other than 'all'.
|
||||
if attribute != 'launchPermission':
|
||||
raise exception.ApiError('attribute not supported: %s' % attribute)
|
||||
raise exception.ApiError(_('attribute not supported: %s')
|
||||
% attribute)
|
||||
if not 'user_group' in kwargs:
|
||||
raise exception.ApiError('user or group not specified')
|
||||
raise exception.ApiError(_('user or group not specified'))
|
||||
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
|
||||
raise exception.ApiError('only group "all" is supported')
|
||||
raise exception.ApiError(_('only group "all" is supported'))
|
||||
if not operation_type in ['add', 'remove']:
|
||||
raise exception.ApiError('operation_type must be add or remove')
|
||||
raise exception.ApiError(_('operation_type must be add or remove'))
|
||||
return self.image_service.modify(context, image_id, operation_type)
|
||||
|
||||
def update_image(self, context, image_id, **kwargs):
|
||||
|
@ -23,9 +23,13 @@ import logging
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import flags
|
||||
from nova.api.ec2 import cloud
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class MetadataRequestHandler(object):
|
||||
"""Serve metadata from the EC2 API."""
|
||||
|
||||
@ -63,12 +67,19 @@ class MetadataRequestHandler(object):
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
cc = cloud.CloudController()
|
||||
meta_data = cc.get_metadata(req.remote_addr)
|
||||
remote_address = req.remote_addr
|
||||
if FLAGS.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||
meta_data = cc.get_metadata(remote_address)
|
||||
if meta_data is None:
|
||||
logging.error('Failed to get metadata for ip: %s' %
|
||||
req.remote_addr)
|
||||
logging.error(_('Failed to get metadata for ip: %s') %
|
||||
remote_address)
|
||||
raise webob.exc.HTTPNotFound()
|
||||
data = self.lookup(req.path_info, meta_data)
|
||||
if data is None:
|
||||
raise webob.exc.HTTPNotFound()
|
||||
return self.print_data(data)
|
||||
|
||||
|
||||
def metadata_factory(global_args, **local_args):
|
||||
return MetadataRequestHandler()
|
||||
|
@ -20,7 +20,6 @@
|
||||
WSGI middleware for OpenStack API controllers.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
import logging
|
||||
@ -41,14 +40,17 @@ from nova.api.openstack import images
|
||||
from nova.api.openstack import ratelimiting
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import sharedipgroups
|
||||
from nova.auth import manager
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('nova_api_auth',
|
||||
'nova.api.openstack.auth.BasicApiAuthManager',
|
||||
flags.DEFINE_string('os_api_auth',
|
||||
'nova.api.openstack.auth.AuthMiddleware',
|
||||
'The auth mechanism to use for the OpenStack API implemenation')
|
||||
|
||||
flags.DEFINE_string('os_api_ratelimiting',
|
||||
'nova.api.openstack.ratelimiting.RateLimitingMiddleware',
|
||||
'Default ratelimiting implementation for the Openstack API')
|
||||
|
||||
flags.DEFINE_bool('allow_admin_api',
|
||||
False,
|
||||
'When True, this API service will accept admin operations.')
|
||||
@ -58,7 +60,10 @@ class API(wsgi.Middleware):
|
||||
"""WSGI entry point for all OpenStack API requests."""
|
||||
|
||||
def __init__(self):
|
||||
app = AuthMiddleware(RateLimitingMiddleware(APIRouter()))
|
||||
auth_middleware = utils.import_class(FLAGS.os_api_auth)
|
||||
ratelimiting_middleware = \
|
||||
utils.import_class(FLAGS.os_api_ratelimiting)
|
||||
app = auth_middleware(ratelimiting_middleware(APIRouter()))
|
||||
super(API, self).__init__(app)
|
||||
|
||||
@webob.dec.wsgify
|
||||
@ -66,102 +71,12 @@ class API(wsgi.Middleware):
|
||||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception as ex:
|
||||
logging.warn("Caught error: %s" % str(ex))
|
||||
logging.debug(traceback.format_exc())
|
||||
logging.warn(_("Caught error: %s") % str(ex))
|
||||
logging.error(traceback.format_exc())
|
||||
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
|
||||
return faults.Fault(exc)
|
||||
|
||||
|
||||
class AuthMiddleware(wsgi.Middleware):
|
||||
"""Authorize the openstack API request or return an HTTP Forbidden."""
|
||||
|
||||
def __init__(self, application):
|
||||
self.auth_driver = utils.import_class(FLAGS.nova_api_auth)()
|
||||
super(AuthMiddleware, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
if 'X-Auth-Token' not in req.headers:
|
||||
return self.auth_driver.authenticate(req)
|
||||
|
||||
user = self.auth_driver.authorize_token(req.headers["X-Auth-Token"])
|
||||
|
||||
if not user:
|
||||
return faults.Fault(webob.exc.HTTPUnauthorized())
|
||||
|
||||
req.environ['nova.context'] = context.RequestContext(user, user)
|
||||
return self.application
|
||||
|
||||
|
||||
class RateLimitingMiddleware(wsgi.Middleware):
|
||||
"""Rate limit incoming requests according to the OpenStack rate limits."""
|
||||
|
||||
def __init__(self, application, service_host=None):
|
||||
"""Create a rate limiting middleware that wraps the given application.
|
||||
|
||||
By default, rate counters are stored in memory. If service_host is
|
||||
specified, the middleware instead relies on the ratelimiting.WSGIApp
|
||||
at the given host+port to keep rate counters.
|
||||
"""
|
||||
super(RateLimitingMiddleware, self).__init__(application)
|
||||
if not service_host:
|
||||
#TODO(gundlach): These limits were based on limitations of Cloud
|
||||
#Servers. We should revisit them in Nova.
|
||||
self.limiter = ratelimiting.Limiter(limits={
|
||||
'DELETE': (100, ratelimiting.PER_MINUTE),
|
||||
'PUT': (10, ratelimiting.PER_MINUTE),
|
||||
'POST': (10, ratelimiting.PER_MINUTE),
|
||||
'POST servers': (50, ratelimiting.PER_DAY),
|
||||
'GET changes-since': (3, ratelimiting.PER_MINUTE),
|
||||
})
|
||||
else:
|
||||
self.limiter = ratelimiting.WSGIAppProxy(service_host)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Rate limit the request.
|
||||
|
||||
If the request should be rate limited, return a 413 status with a
|
||||
Retry-After header giving the time when the request would succeed.
|
||||
"""
|
||||
action_name = self.get_action_name(req)
|
||||
if not action_name:
|
||||
# Not rate limited
|
||||
return self.application
|
||||
delay = self.get_delay(action_name,
|
||||
req.environ['nova.context'].user_id)
|
||||
if delay:
|
||||
# TODO(gundlach): Get the retry-after format correct.
|
||||
exc = webob.exc.HTTPRequestEntityTooLarge(
|
||||
explanation='Too many requests.',
|
||||
headers={'Retry-After': time.time() + delay})
|
||||
raise faults.Fault(exc)
|
||||
return self.application
|
||||
|
||||
def get_delay(self, action_name, username):
|
||||
"""Return the delay for the given action and username, or None if
|
||||
the action would not be rate limited.
|
||||
"""
|
||||
if action_name == 'POST servers':
|
||||
# "POST servers" is a POST, so it counts against "POST" too.
|
||||
# Attempt the "POST" first, lest we are rate limited by "POST" but
|
||||
# use up a precious "POST servers" call.
|
||||
delay = self.limiter.perform("POST", username=username)
|
||||
if delay:
|
||||
return delay
|
||||
return self.limiter.perform(action_name, username=username)
|
||||
|
||||
def get_action_name(self, req):
|
||||
"""Return the action name for this request."""
|
||||
if req.method == 'GET' and 'changes-since' in req.GET:
|
||||
return 'GET changes-since'
|
||||
if req.method == 'POST' and req.path_info.startswith('/servers'):
|
||||
return 'POST servers'
|
||||
if req.method in ['PUT', 'POST', 'DELETE']:
|
||||
return req.method
|
||||
return None
|
||||
|
||||
|
||||
class APIRouter(wsgi.Router):
|
||||
"""
|
||||
Routes requests on the OpenStack API to the appropriate controller
|
||||
@ -170,11 +85,22 @@ class APIRouter(wsgi.Router):
|
||||
|
||||
def __init__(self):
|
||||
mapper = routes.Mapper()
|
||||
|
||||
server_members = {'action': 'POST'}
|
||||
if FLAGS.allow_admin_api:
|
||||
logging.debug("Including admin operations in API.")
|
||||
server_members['pause'] = 'POST'
|
||||
server_members['unpause'] = 'POST'
|
||||
server_members["diagnostics"] = "GET"
|
||||
server_members["actions"] = "GET"
|
||||
server_members['suspend'] = 'POST'
|
||||
server_members['resume'] = 'POST'
|
||||
|
||||
mapper.resource("server", "servers", controller=servers.Controller(),
|
||||
collection={'detail': 'GET'},
|
||||
member={'action': 'POST'})
|
||||
member=server_members)
|
||||
|
||||
mapper.resource("backup_schedule", "backup_schedules",
|
||||
mapper.resource("backup_schedule", "backup_schedule",
|
||||
controller=backup_schedules.Controller(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
@ -186,27 +112,25 @@ class APIRouter(wsgi.Router):
|
||||
mapper.resource("sharedipgroup", "sharedipgroups",
|
||||
controller=sharedipgroups.Controller())
|
||||
|
||||
if FLAGS.allow_admin_api:
|
||||
logging.debug("Including admin operations in API.")
|
||||
# TODO: Place routes for admin operations here.
|
||||
|
||||
super(APIRouter, self).__init__(mapper)
|
||||
|
||||
|
||||
def limited(items, req):
|
||||
"""Return a slice of items according to requested offset and limit.
|
||||
class Versions(wsgi.Application):
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Respond to a request for all OpenStack API versions."""
|
||||
response = {
|
||||
"versions": [
|
||||
dict(status="CURRENT", id="v1.0")]}
|
||||
metadata = {
|
||||
"application/xml": {
|
||||
"attributes": dict(version=["status", "id"])}}
|
||||
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
|
||||
|
||||
items - a sliceable
|
||||
req - wobob.Request possibly containing offset and limit GET variables.
|
||||
offset is where to start in the list, and limit is the maximum number
|
||||
of items to return.
|
||||
|
||||
If limit is not specified, 0, or > 1000, defaults to 1000.
|
||||
"""
|
||||
offset = int(req.GET.get('offset', 0))
|
||||
limit = int(req.GET.get('limit', 0))
|
||||
if not limit:
|
||||
limit = 1000
|
||||
limit = min(1000, limit)
|
||||
range_end = offset + limit
|
||||
return items[offset:range_end]
|
||||
def router_factory(global_cof, **local_conf):
|
||||
return APIRouter()
|
||||
|
||||
|
||||
def versions_factory(global_conf, **local_conf):
|
||||
return Versions()
|
||||
|
@ -1,3 +1,20 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.import datetime
|
||||
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
@ -7,29 +24,46 @@ import webob.exc
|
||||
import webob.dec
|
||||
|
||||
from nova import auth
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import manager
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import faults
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Context(object):
|
||||
pass
|
||||
class AuthMiddleware(wsgi.Middleware):
|
||||
"""Authorize the openstack API request or return an HTTP Forbidden."""
|
||||
|
||||
|
||||
class BasicApiAuthManager(object):
|
||||
""" Implements a somewhat rudimentary version of OpenStack Auth"""
|
||||
|
||||
def __init__(self, db_driver=None):
|
||||
def __init__(self, application, db_driver=None):
|
||||
if not db_driver:
|
||||
db_driver = FLAGS.db_driver
|
||||
self.db = utils.import_object(db_driver)
|
||||
self.auth = auth.manager.AuthManager()
|
||||
self.context = Context()
|
||||
super(BasicApiAuthManager, self).__init__()
|
||||
super(AuthMiddleware, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
if not self.has_authentication(req):
|
||||
return self.authenticate(req)
|
||||
|
||||
user = self.get_user_by_authentication(req)
|
||||
|
||||
if not user:
|
||||
return faults.Fault(webob.exc.HTTPUnauthorized())
|
||||
|
||||
project = self.auth.get_project(FLAGS.default_project)
|
||||
req.environ['nova.context'] = context.RequestContext(user, project)
|
||||
return self.application
|
||||
|
||||
def has_authentication(self, req):
|
||||
return 'X-Auth-Token' in req.headers
|
||||
|
||||
def get_user_by_authentication(self, req):
|
||||
return self.authorize_token(req.headers["X-Auth-Token"])
|
||||
|
||||
def authenticate(self, req):
|
||||
# Unless the request is explicitly made against /<version>/ don't
|
||||
@ -68,11 +102,12 @@ class BasicApiAuthManager(object):
|
||||
This method will also remove the token if the timestamp is older than
|
||||
2 days ago.
|
||||
"""
|
||||
token = self.db.auth_get_token(self.context, token_hash)
|
||||
ctxt = context.get_admin_context()
|
||||
token = self.db.auth_get_token(ctxt, token_hash)
|
||||
if token:
|
||||
delta = datetime.datetime.now() - token.created_at
|
||||
if delta.days >= 2:
|
||||
self.db.auth_destroy_token(self.context, token)
|
||||
self.db.auth_destroy_token(ctxt, token)
|
||||
else:
|
||||
return self.auth.get_user(token.user_id)
|
||||
return None
|
||||
@ -84,6 +119,7 @@ class BasicApiAuthManager(object):
|
||||
key - string API key
|
||||
req - webob.Request object
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
user = self.auth.get_user_from_access_key(key)
|
||||
if user and user.name == username:
|
||||
token_hash = hashlib.sha1('%s%s%f' % (username, key,
|
||||
@ -95,6 +131,12 @@ class BasicApiAuthManager(object):
|
||||
token_dict['server_management_url'] = req.url
|
||||
token_dict['storage_url'] = ''
|
||||
token_dict['user_id'] = user.id
|
||||
token = self.db.auth_create_token(self.context, token_dict)
|
||||
token = self.db.auth_create_token(ctxt, token_dict)
|
||||
return token, user
|
||||
return None, None
|
||||
|
||||
|
||||
def auth_factory(global_conf, **local_conf):
|
||||
def auth(app):
|
||||
return AuthMiddleware(app)
|
||||
return auth
|
||||
|
@ -23,12 +23,25 @@ from nova.api.openstack import faults
|
||||
import nova.image.service
|
||||
|
||||
|
||||
def _translate_keys(inst):
|
||||
""" Coerces the backup schedule into proper dictionary format """
|
||||
return dict(backupSchedule=inst)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
""" The backup schedule API controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'attributes': {
|
||||
'backupSchedule': []}}}
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def index(self, req, server_id):
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
""" Returns the list of backup schedules for a given instance """
|
||||
return _translate_keys({})
|
||||
|
||||
def create(self, req, server_id):
|
||||
""" No actual update method required, since the existing API allows
|
||||
@ -36,4 +49,5 @@ class Controller(wsgi.Controller):
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
def delete(self, req, server_id, id):
|
||||
""" Deletes an existing backup schedule """
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
|
36
nova/api/openstack/common.py
Normal file
36
nova/api/openstack/common.py
Normal file
@ -0,0 +1,36 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def limited(items, req):
|
||||
"""Return a slice of items according to requested offset and limit.
|
||||
|
||||
items - a sliceable
|
||||
req - wobob.Request possibly containing offset and limit GET variables.
|
||||
offset is where to start in the list, and limit is the maximum number
|
||||
of items to return.
|
||||
|
||||
If limit is not specified, 0, or > 1000, defaults to 1000.
|
||||
"""
|
||||
|
||||
offset = int(req.GET.get('offset', 0))
|
||||
limit = int(req.GET.get('limit', 0))
|
||||
if not limit:
|
||||
limit = 1000
|
||||
limit = min(1000, limit)
|
||||
range_end = offset + limit
|
||||
return items[offset:range_end]
|
@ -18,6 +18,7 @@
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import common
|
||||
from nova.compute import instance_types
|
||||
from nova import wsgi
|
||||
import nova.api.openstack
|
||||
@ -39,7 +40,7 @@ class Controller(wsgi.Controller):
|
||||
def detail(self, req):
|
||||
"""Return all flavors in detail."""
|
||||
items = [self.show(req, id)['flavor'] for id in self._all_ids()]
|
||||
items = nova.api.openstack.limited(items, req)
|
||||
items = common.limited(items, req)
|
||||
return dict(flavors=items)
|
||||
|
||||
def show(self, req, id):
|
||||
|
@ -22,12 +22,73 @@ from nova import utils
|
||||
from nova import wsgi
|
||||
import nova.api.openstack
|
||||
import nova.image.service
|
||||
from nova.api.openstack import faults
|
||||
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.compute import api as compute_api
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def _translate_keys(item):
|
||||
"""
|
||||
Maps key names to Rackspace-like attributes for return
|
||||
also pares down attributes to those we want
|
||||
item is a dict
|
||||
|
||||
Note: should be removed when the set of keys expected by the api
|
||||
and the set of keys returned by the image service are equivalent
|
||||
|
||||
"""
|
||||
# TODO(tr3buchet): this map is specific to s3 object store,
|
||||
# replace with a list of keys for _filter_keys later
|
||||
mapped_keys = {'status': 'imageState',
|
||||
'id': 'imageId',
|
||||
'name': 'imageLocation'}
|
||||
|
||||
mapped_item = {}
|
||||
# TODO(tr3buchet):
|
||||
# this chunk of code works with s3 and the local image service/glance
|
||||
# when we switch to glance/local image service it can be replaced with
|
||||
# a call to _filter_keys, and mapped_keys can be changed to a list
|
||||
try:
|
||||
for k, v in mapped_keys.iteritems():
|
||||
# map s3 fields
|
||||
mapped_item[k] = item[v]
|
||||
except KeyError:
|
||||
# return only the fields api expects
|
||||
mapped_item = _filter_keys(item, mapped_keys.keys())
|
||||
|
||||
return mapped_item
|
||||
|
||||
|
||||
def _translate_status(item):
|
||||
"""
|
||||
Translates status of image to match current Rackspace api bindings
|
||||
item is a dict
|
||||
|
||||
Note: should be removed when the set of statuses expected by the api
|
||||
and the set of statuses returned by the image service are equivalent
|
||||
|
||||
"""
|
||||
status_mapping = {
|
||||
'pending': 'queued',
|
||||
'decrypting': 'preparing',
|
||||
'untarring': 'saving',
|
||||
'available': 'active'}
|
||||
item['status'] = status_mapping[item['status']]
|
||||
return item
|
||||
|
||||
|
||||
def _filter_keys(item, keys):
|
||||
"""
|
||||
Filters all model attributes except for keys
|
||||
item is a dict
|
||||
|
||||
"""
|
||||
return dict((k, v) for k, v in item.iteritems() if k in keys)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
|
||||
_serialization_metadata = {
|
||||
@ -40,24 +101,25 @@ class Controller(wsgi.Controller):
|
||||
self._service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
def index(self, req):
|
||||
"""Return all public images in brief."""
|
||||
return dict(images=[dict(id=img['id'], name=img['name'])
|
||||
for img in self.detail(req)['images']])
|
||||
"""Return all public images in brief"""
|
||||
items = self._service.index(req.environ['nova.context'])
|
||||
items = common.limited(items, req)
|
||||
items = [_filter_keys(item, ('id', 'name')) for item in items]
|
||||
return dict(images=items)
|
||||
|
||||
def detail(self, req):
|
||||
"""Return all public images in detail."""
|
||||
"""Return all public images in detail"""
|
||||
try:
|
||||
images = self._service.detail(req.environ['nova.context'])
|
||||
images = nova.api.openstack.limited(images, req)
|
||||
items = self._service.detail(req.environ['nova.context'])
|
||||
except NotImplementedError:
|
||||
# Emulate detail() using repeated calls to show()
|
||||
images = self._service.index(ctxt)
|
||||
images = nova.api.openstack.limited(images, req)
|
||||
images = [self._service.show(ctxt, i['id']) for i in images]
|
||||
return dict(images=images)
|
||||
items = self._service.index(req.environ['nova.context'])
|
||||
items = common.limited(items, req)
|
||||
items = [_translate_keys(item) for item in items]
|
||||
items = [_translate_status(item) for item in items]
|
||||
return dict(images=items)
|
||||
|
||||
def show(self, req, id):
|
||||
"""Return data about the given image id."""
|
||||
"""Return data about the given image id"""
|
||||
return dict(image=self._service.show(req.environ['nova.context'], id))
|
||||
|
||||
def delete(self, req, id):
|
||||
@ -65,9 +127,11 @@ class Controller(wsgi.Controller):
|
||||
raise faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
def create(self, req):
|
||||
# Only public images are supported for now, so a request to
|
||||
# make a backup of a server cannot be supproted.
|
||||
raise faults.Fault(exc.HTTPNotFound())
|
||||
context = req.environ['nova.context']
|
||||
env = self._deserialize(req.body, req)
|
||||
instance_id = env["image"]["serverId"]
|
||||
name = env["image"]["name"]
|
||||
return compute_api.ComputeAPI().snapshot(context, instance_id, name)
|
||||
|
||||
def update(self, req, id):
|
||||
# Users may not modify public images, and that's all that
|
||||
|
@ -1,3 +1,20 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.import datetime
|
||||
|
||||
"""Rate limiting of arbitrary actions."""
|
||||
|
||||
import httplib
|
||||
@ -6,6 +23,8 @@ import urllib
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import faults
|
||||
|
||||
# Convenience constants for the limits dictionary passed to Limiter().
|
||||
PER_SECOND = 1
|
||||
@ -14,6 +33,83 @@ PER_HOUR = 60 * 60
|
||||
PER_DAY = 60 * 60 * 24
|
||||
|
||||
|
||||
class RateLimitingMiddleware(wsgi.Middleware):
|
||||
"""Rate limit incoming requests according to the OpenStack rate limits."""
|
||||
|
||||
def __init__(self, application, service_host=None):
|
||||
"""Create a rate limiting middleware that wraps the given application.
|
||||
|
||||
By default, rate counters are stored in memory. If service_host is
|
||||
specified, the middleware instead relies on the ratelimiting.WSGIApp
|
||||
at the given host+port to keep rate counters.
|
||||
"""
|
||||
if not service_host:
|
||||
#TODO(gundlach): These limits were based on limitations of Cloud
|
||||
#Servers. We should revisit them in Nova.
|
||||
self.limiter = Limiter(limits={
|
||||
'DELETE': (100, PER_MINUTE),
|
||||
'PUT': (10, PER_MINUTE),
|
||||
'POST': (10, PER_MINUTE),
|
||||
'POST servers': (50, PER_DAY),
|
||||
'GET changes-since': (3, PER_MINUTE),
|
||||
})
|
||||
else:
|
||||
self.limiter = WSGIAppProxy(service_host)
|
||||
super(RateLimitingMiddleware, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Rate limit the request.
|
||||
|
||||
If the request should be rate limited, return a 413 status with a
|
||||
Retry-After header giving the time when the request would succeed.
|
||||
"""
|
||||
return self.rate_limited_request(req, self.application)
|
||||
|
||||
def rate_limited_request(self, req, application):
|
||||
"""Rate limit the request.
|
||||
|
||||
If the request should be rate limited, return a 413 status with a
|
||||
Retry-After header giving the time when the request would succeed.
|
||||
"""
|
||||
action_name = self.get_action_name(req)
|
||||
if not action_name:
|
||||
# Not rate limited
|
||||
return application
|
||||
delay = self.get_delay(action_name,
|
||||
req.environ['nova.context'].user_id)
|
||||
if delay:
|
||||
# TODO(gundlach): Get the retry-after format correct.
|
||||
exc = webob.exc.HTTPRequestEntityTooLarge(
|
||||
explanation=('Too many requests.'),
|
||||
headers={'Retry-After': time.time() + delay})
|
||||
raise faults.Fault(exc)
|
||||
return application
|
||||
|
||||
def get_delay(self, action_name, username):
|
||||
"""Return the delay for the given action and username, or None if
|
||||
the action would not be rate limited.
|
||||
"""
|
||||
if action_name == 'POST servers':
|
||||
# "POST servers" is a POST, so it counts against "POST" too.
|
||||
# Attempt the "POST" first, lest we are rate limited by "POST" but
|
||||
# use up a precious "POST servers" call.
|
||||
delay = self.limiter.perform("POST", username=username)
|
||||
if delay:
|
||||
return delay
|
||||
return self.limiter.perform(action_name, username=username)
|
||||
|
||||
def get_action_name(self, req):
|
||||
"""Return the action name for this request."""
|
||||
if req.method == 'GET' and 'changes-since' in req.GET:
|
||||
return 'GET changes-since'
|
||||
if req.method == 'POST' and req.path_info.startswith('/servers'):
|
||||
return 'POST servers'
|
||||
if req.method in ['PUT', 'POST', 'DELETE']:
|
||||
return req.method
|
||||
return None
|
||||
|
||||
|
||||
class Limiter(object):
|
||||
|
||||
"""Class providing rate limiting of arbitrary actions."""
|
||||
@ -123,3 +219,9 @@ class WSGIAppProxy(object):
|
||||
# No delay
|
||||
return None
|
||||
return float(resp.getheader('X-Wait-Seconds'))
|
||||
|
||||
|
||||
def ratelimit_factory(global_conf, **local_conf):
|
||||
def rl(app):
|
||||
return RateLimitingMiddleware(app)
|
||||
return rl
|
||||
|
@ -15,10 +15,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import exception
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.auth import manager as auth_manager
|
||||
from nova.compute import api as compute_api
|
||||
@ -27,18 +31,20 @@ from nova.compute import power_state
|
||||
import nova.api.openstack
|
||||
|
||||
|
||||
def _entity_list(entities):
|
||||
""" Coerces a list of servers into proper dictionary format """
|
||||
return dict(servers=entities)
|
||||
LOG = logging.getLogger('server')
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def _entity_detail(inst):
|
||||
""" Maps everything to Rackspace-like attributes for return"""
|
||||
def _translate_detail_keys(inst):
|
||||
""" Coerces into dictionary format, mapping everything to Rackspace-like
|
||||
attributes for return"""
|
||||
power_mapping = {
|
||||
None: 'build',
|
||||
power_state.NOSTATE: 'build',
|
||||
power_state.RUNNING: 'active',
|
||||
power_state.BLOCKED: 'active',
|
||||
power_state.PAUSED: 'suspended',
|
||||
power_state.SUSPENDED: 'suspended',
|
||||
power_state.PAUSED: 'error',
|
||||
power_state.SHUTDOWN: 'active',
|
||||
power_state.SHUTOFF: 'active',
|
||||
power_state.CRASHED: 'error'}
|
||||
@ -58,8 +64,9 @@ def _entity_detail(inst):
|
||||
return dict(server=inst_dict)
|
||||
|
||||
|
||||
def _entity_inst(inst):
|
||||
""" Filters all model attributes save for id and name """
|
||||
def _translate_keys(inst):
|
||||
""" Coerces into dictionary format, excluding all model attributes
|
||||
save for id and name """
|
||||
return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
|
||||
|
||||
|
||||
@ -78,29 +85,29 @@ class Controller(wsgi.Controller):
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of server names and ids for a given user """
|
||||
return self._items(req, entity_maker=_entity_inst)
|
||||
return self._items(req, entity_maker=_translate_keys)
|
||||
|
||||
def detail(self, req):
|
||||
""" Returns a list of server details for a given user """
|
||||
return self._items(req, entity_maker=_entity_detail)
|
||||
return self._items(req, entity_maker=_translate_detail_keys)
|
||||
|
||||
def _items(self, req, entity_maker):
|
||||
"""Returns a list of servers for a given user.
|
||||
|
||||
entity_maker - either _entity_detail or _entity_inst
|
||||
entity_maker - either _translate_detail_keys or _translate_keys
|
||||
"""
|
||||
instance_list = self.compute_api.get_instances(
|
||||
req.environ['nova.context'])
|
||||
limited_list = nova.api.openstack.limited(instance_list, req)
|
||||
limited_list = common.limited(instance_list, req)
|
||||
res = [entity_maker(inst)['server'] for inst in limited_list]
|
||||
return _entity_list(res)
|
||||
return dict(servers=res)
|
||||
|
||||
def show(self, req, id):
|
||||
""" Returns server details by server id """
|
||||
try:
|
||||
instance = self.compute_api.get_instance(
|
||||
req.environ['nova.context'], int(id))
|
||||
return _entity_detail(instance)
|
||||
return _translate_detail_keys(instance)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
@ -129,7 +136,7 @@ class Controller(wsgi.Controller):
|
||||
description=env['server']['name'],
|
||||
key_name=key_pair['name'],
|
||||
key_data=key_pair['public_key'])
|
||||
return _entity_inst(instances[0])
|
||||
return _translate_keys(instances[0])
|
||||
|
||||
def update(self, req, id):
|
||||
""" Updates the server name or password """
|
||||
@ -144,8 +151,9 @@ class Controller(wsgi.Controller):
|
||||
update_dict['display_name'] = inst_dict['server']['name']
|
||||
|
||||
try:
|
||||
self.compute_api.update_instance(req.environ['nova.context'],
|
||||
instance['id'],
|
||||
ctxt = req.environ['nova.context']
|
||||
self.compute_api.update_instance(ctxt,
|
||||
id,
|
||||
**update_dict)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
@ -166,3 +174,57 @@ class Controller(wsgi.Controller):
|
||||
except:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def pause(self, req, id):
|
||||
""" Permit Admins to Pause the server. """
|
||||
ctxt = req.environ['nova.context']
|
||||
try:
|
||||
self.compute_api.pause(ctxt, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
logging.error(_("Compute.api::pause %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def unpause(self, req, id):
|
||||
""" Permit Admins to Unpause the server. """
|
||||
ctxt = req.environ['nova.context']
|
||||
try:
|
||||
self.compute_api.unpause(ctxt, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
logging.error(_("Compute.api::unpause %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def suspend(self, req, id):
|
||||
"""permit admins to suspend the server"""
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
self.compute_api.suspend(context, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
logging.error(_("compute.api::suspend %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def resume(self, req, id):
|
||||
"""permit admins to resume the server from suspend"""
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
self.compute_api.resume(context, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
logging.error(_("compute.api::resume %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def diagnostics(self, req, id):
|
||||
"""Permit Admins to retrieve server diagnostics."""
|
||||
ctxt = req.environ["nova.context"]
|
||||
return self.compute_api.get_diagnostics(ctxt, id)
|
||||
|
||||
def actions(self, req, id):
|
||||
"""Permit Admins to retrieve server actions."""
|
||||
ctxt = req.environ["nova.context"]
|
||||
return self.compute_api.get_actions(ctxt, id)
|
||||
|
@ -15,8 +15,51 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
def _translate_keys(inst):
|
||||
""" Coerces a shared IP group instance into proper dictionary format """
|
||||
return dict(sharedIpGroup=inst)
|
||||
|
||||
|
||||
def _translate_detail_keys(inst):
|
||||
""" Coerces a shared IP group instance into proper dictionary format with
|
||||
correctly mapped attributes """
|
||||
return dict(sharedIpGroup=inst)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
pass
|
||||
""" The Shared IP Groups Controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'attributes': {
|
||||
'sharedIpGroup': []}}}
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of Shared IP Groups for the user """
|
||||
return dict(sharedIpGroups=[])
|
||||
|
||||
def show(self, req, id):
|
||||
""" Shows in-depth information on a specific Shared IP Group """
|
||||
return _translate_keys({})
|
||||
|
||||
def update(self, req, id):
|
||||
""" You can't update a Shared IP Group """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def delete(self, req, id):
|
||||
""" Deletes a Shared IP Group """
|
||||
raise faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
def detail(self, req, id):
|
||||
""" Returns a complete list of Shared IP Groups """
|
||||
return _translate_detail_keys({})
|
||||
|
||||
def create(self, req):
|
||||
""" Creates a new Shared IP group """
|
||||
raise faults.Fault(exc.HTTPNotFound())
|
||||
|
@ -37,7 +37,6 @@ class DbDriver(object):
|
||||
def __init__(self):
|
||||
"""Imports the LDAP module"""
|
||||
pass
|
||||
db
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@ -83,7 +82,7 @@ class DbDriver(object):
|
||||
user_ref = db.user_create(context.get_admin_context(), values)
|
||||
return self._db_user_to_auth_user(user_ref)
|
||||
except exception.Duplicate, e:
|
||||
raise exception.Duplicate('User %s already exists' % name)
|
||||
raise exception.Duplicate(_('User %s already exists') % name)
|
||||
|
||||
def _db_user_to_auth_user(self, user_ref):
|
||||
return {'id': user_ref['id'],
|
||||
@ -105,8 +104,9 @@ class DbDriver(object):
|
||||
"""Create a project"""
|
||||
manager = db.user_get(context.get_admin_context(), manager_uid)
|
||||
if not manager:
|
||||
raise exception.NotFound("Project can't be created because "
|
||||
"manager %s doesn't exist" % manager_uid)
|
||||
raise exception.NotFound(_("Project can't be created because "
|
||||
"manager %s doesn't exist")
|
||||
% manager_uid)
|
||||
|
||||
# description is a required attribute
|
||||
if description is None:
|
||||
@ -133,8 +133,8 @@ class DbDriver(object):
|
||||
try:
|
||||
project = db.project_create(context.get_admin_context(), values)
|
||||
except exception.Duplicate:
|
||||
raise exception.Duplicate("Project can't be created because "
|
||||
"project %s already exists" % name)
|
||||
raise exception.Duplicate(_("Project can't be created because "
|
||||
"project %s already exists") % name)
|
||||
|
||||
for member in members:
|
||||
db.project_add_member(context.get_admin_context(),
|
||||
@ -155,8 +155,8 @@ class DbDriver(object):
|
||||
if manager_uid:
|
||||
manager = db.user_get(context.get_admin_context(), manager_uid)
|
||||
if not manager:
|
||||
raise exception.NotFound("Project can't be modified because "
|
||||
"manager %s doesn't exist" %
|
||||
raise exception.NotFound(_("Project can't be modified because "
|
||||
"manager %s doesn't exist") %
|
||||
manager_uid)
|
||||
values['project_manager'] = manager['id']
|
||||
if description:
|
||||
@ -243,8 +243,8 @@ class DbDriver(object):
|
||||
def _validate_user_and_project(self, user_id, project_id):
|
||||
user = db.user_get(context.get_admin_context(), user_id)
|
||||
if not user:
|
||||
raise exception.NotFound('User "%s" not found' % user_id)
|
||||
raise exception.NotFound(_('User "%s" not found') % user_id)
|
||||
project = db.project_get(context.get_admin_context(), project_id)
|
||||
if not project:
|
||||
raise exception.NotFound('Project "%s" not found' % project_id)
|
||||
raise exception.NotFound(_('Project "%s" not found') % project_id)
|
||||
return user, project
|
||||
|
@ -15,7 +15,7 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Fake LDAP server for test harness, backs to ReDIS.
|
||||
"""Fake LDAP server for test harness.
|
||||
|
||||
This class does very little error checking, and knows nothing about ldap
|
||||
class definitions. It implements the minimum emulation of the python ldap
|
||||
@ -23,34 +23,65 @@ library to work with nova.
|
||||
|
||||
"""
|
||||
|
||||
import fnmatch
|
||||
import json
|
||||
import redis
|
||||
|
||||
from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('redis_host', '127.0.0.1',
|
||||
'Host that redis is running on.')
|
||||
flags.DEFINE_integer('redis_port', 6379,
|
||||
'Port that redis is running on.')
|
||||
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
|
||||
|
||||
|
||||
class Redis(object):
|
||||
class Store(object):
|
||||
def __init__(self):
|
||||
if hasattr(self.__class__, '_instance'):
|
||||
raise Exception('Attempted to instantiate singleton')
|
||||
raise Exception(_('Attempted to instantiate singleton'))
|
||||
|
||||
@classmethod
|
||||
def instance(cls):
|
||||
if not hasattr(cls, '_instance'):
|
||||
inst = redis.Redis(host=FLAGS.redis_host,
|
||||
port=FLAGS.redis_port,
|
||||
db=FLAGS.redis_db)
|
||||
cls._instance = inst
|
||||
cls._instance = _StorageDict()
|
||||
return cls._instance
|
||||
|
||||
|
||||
class _StorageDict(dict):
|
||||
def keys(self, pat=None):
|
||||
ret = super(_StorageDict, self).keys()
|
||||
if pat is not None:
|
||||
ret = fnmatch.filter(ret, pat)
|
||||
return ret
|
||||
|
||||
def delete(self, key):
|
||||
try:
|
||||
del self[key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def flushdb(self):
|
||||
self.clear()
|
||||
|
||||
def hgetall(self, key):
|
||||
"""Returns the hash for the given key; creates
|
||||
the hash if the key doesn't exist."""
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = {}
|
||||
return self[key]
|
||||
|
||||
def hget(self, key, field):
|
||||
hashdict = self.hgetall(key)
|
||||
try:
|
||||
return hashdict[field]
|
||||
except KeyError:
|
||||
hashdict[field] = {}
|
||||
return hashdict[field]
|
||||
|
||||
def hset(self, key, field, val):
|
||||
hashdict = self.hgetall(key)
|
||||
hashdict[field] = val
|
||||
|
||||
def hmset(self, key, value_dict):
|
||||
hashdict = self.hgetall(key)
|
||||
for field, val in value_dict.items():
|
||||
hashdict[field] = val
|
||||
|
||||
|
||||
SCOPE_BASE = 0
|
||||
SCOPE_ONELEVEL = 1 # Not implemented
|
||||
SCOPE_SUBTREE = 2
|
||||
@ -119,6 +150,9 @@ def _match(key, value, attrs):
|
||||
"""Match a given key and value against an attribute list."""
|
||||
if key not in attrs:
|
||||
return False
|
||||
# This is a wild card search. Implemented as all or nothing for now.
|
||||
if value == "*":
|
||||
return True
|
||||
if key != "objectclass":
|
||||
return value in attrs[key]
|
||||
# it is an objectclass check, so check subclasses
|
||||
@ -169,8 +203,6 @@ def _to_json(unencoded):
|
||||
|
||||
|
||||
class FakeLDAP(object):
|
||||
#TODO(vish): refactor this class to use a wrapper instead of accessing
|
||||
# redis directly
|
||||
"""Fake LDAP connection."""
|
||||
|
||||
def simple_bind_s(self, dn, password):
|
||||
@ -183,14 +215,13 @@ class FakeLDAP(object):
|
||||
|
||||
def add_s(self, dn, attr):
|
||||
"""Add an object with the specified attributes at dn."""
|
||||
key = "%s%s" % (self.__redis_prefix, dn)
|
||||
|
||||
key = "%s%s" % (self.__prefix, dn)
|
||||
value_dict = dict([(k, _to_json(v)) for k, v in attr])
|
||||
Redis.instance().hmset(key, value_dict)
|
||||
Store.instance().hmset(key, value_dict)
|
||||
|
||||
def delete_s(self, dn):
|
||||
"""Remove the ldap object at specified dn."""
|
||||
Redis.instance().delete("%s%s" % (self.__redis_prefix, dn))
|
||||
Store.instance().delete("%s%s" % (self.__prefix, dn))
|
||||
|
||||
def modify_s(self, dn, attrs):
|
||||
"""Modify the object at dn using the attribute list.
|
||||
@ -201,18 +232,18 @@ class FakeLDAP(object):
|
||||
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
|
||||
|
||||
"""
|
||||
redis = Redis.instance()
|
||||
key = "%s%s" % (self.__redis_prefix, dn)
|
||||
store = Store.instance()
|
||||
key = "%s%s" % (self.__prefix, dn)
|
||||
|
||||
for cmd, k, v in attrs:
|
||||
values = _from_json(redis.hget(key, k))
|
||||
values = _from_json(store.hget(key, k))
|
||||
if cmd == MOD_ADD:
|
||||
values.append(v)
|
||||
elif cmd == MOD_REPLACE:
|
||||
values = [v]
|
||||
else:
|
||||
values.remove(v)
|
||||
values = redis.hset(key, k, _to_json(values))
|
||||
values = store.hset(key, k, _to_json(values))
|
||||
|
||||
def search_s(self, dn, scope, query=None, fields=None):
|
||||
"""Search for all matching objects under dn using the query.
|
||||
@ -226,16 +257,17 @@ class FakeLDAP(object):
|
||||
"""
|
||||
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
|
||||
raise NotImplementedError(str(scope))
|
||||
redis = Redis.instance()
|
||||
store = Store.instance()
|
||||
if scope == SCOPE_BASE:
|
||||
keys = ["%s%s" % (self.__redis_prefix, dn)]
|
||||
keys = ["%s%s" % (self.__prefix, dn)]
|
||||
else:
|
||||
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
|
||||
keys = store.keys("%s*%s" % (self.__prefix, dn))
|
||||
|
||||
objects = []
|
||||
for key in keys:
|
||||
# get the attributes from redis
|
||||
attrs = redis.hgetall(key)
|
||||
# turn the values from redis into lists
|
||||
# get the attributes from the store
|
||||
attrs = store.hgetall(key)
|
||||
# turn the values from the store into lists
|
||||
# pylint: disable-msg=E1103
|
||||
attrs = dict([(k, _from_json(v))
|
||||
for k, v in attrs.iteritems()])
|
||||
@ -244,13 +276,13 @@ class FakeLDAP(object):
|
||||
# filter the attributes by fields
|
||||
attrs = dict([(k, v) for k, v in attrs.iteritems()
|
||||
if not fields or k in fields])
|
||||
objects.append((key[len(self.__redis_prefix):], attrs))
|
||||
objects.append((key[len(self.__prefix):], attrs))
|
||||
# pylint: enable-msg=E1103
|
||||
if objects == []:
|
||||
raise NO_SUCH_OBJECT()
|
||||
return objects
|
||||
|
||||
@property
|
||||
def __redis_prefix(self): # pylint: disable-msg=R0201
|
||||
"""Get the prefix to use for all redis keys."""
|
||||
def __prefix(self): # pylint: disable-msg=R0201
|
||||
"""Get the prefix to use for all keys."""
|
||||
return 'ldap:'
|
||||
|
@ -32,11 +32,16 @@ from nova import flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('ldap_schema_version', 2,
|
||||
'Current version of the LDAP schema')
|
||||
flags.DEFINE_string('ldap_url', 'ldap://localhost',
|
||||
'Point this at your ldap server')
|
||||
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
|
||||
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
|
||||
'DN of admin user')
|
||||
flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id')
|
||||
flags.DEFINE_string('ldap_user_name_attribute', 'cn',
|
||||
'Attribute to use as name')
|
||||
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
||||
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
||||
'OU for Users')
|
||||
@ -73,10 +78,20 @@ class LdapDriver(object):
|
||||
Defines enter and exit and therefore supports the with/as syntax.
|
||||
"""
|
||||
|
||||
project_pattern = '(owner=*)'
|
||||
isadmin_attribute = 'isNovaAdmin'
|
||||
project_attribute = 'owner'
|
||||
project_objectclass = 'groupOfNames'
|
||||
|
||||
def __init__(self):
|
||||
"""Imports the LDAP module"""
|
||||
self.ldap = __import__('ldap')
|
||||
self.conn = None
|
||||
if FLAGS.ldap_schema_version == 1:
|
||||
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||
LdapDriver.project_attribute = 'projectManager'
|
||||
LdapDriver.project_objectclass = 'novaProject'
|
||||
|
||||
def __enter__(self):
|
||||
"""Creates the connection to LDAP"""
|
||||
@ -104,13 +119,13 @@ class LdapDriver(object):
|
||||
"""Retrieve project by id"""
|
||||
dn = 'cn=%s,%s' % (pid,
|
||||
FLAGS.ldap_project_subtree)
|
||||
attr = self.__find_object(dn, '(objectclass=novaProject)')
|
||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
||||
return self.__to_project(attr)
|
||||
|
||||
def get_users(self):
|
||||
"""Retrieve list of users"""
|
||||
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
||||
'(objectclass=novaUser)')
|
||||
'(objectclass=novaUser)')
|
||||
users = []
|
||||
for attr in attrs:
|
||||
user = self.__to_user(attr)
|
||||
@ -120,7 +135,7 @@ class LdapDriver(object):
|
||||
|
||||
def get_projects(self, uid=None):
|
||||
"""Retrieve list of projects"""
|
||||
pattern = '(objectclass=novaProject)'
|
||||
pattern = LdapDriver.project_pattern
|
||||
if uid:
|
||||
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
|
||||
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
|
||||
@ -139,27 +154,29 @@ class LdapDriver(object):
|
||||
# Malformed entries are useless, replace attributes found.
|
||||
attr = []
|
||||
if 'secretKey' in user.keys():
|
||||
attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
|
||||
[secret_key]))
|
||||
attr.append((self.ldap.MOD_REPLACE, 'secretKey',
|
||||
[secret_key]))
|
||||
else:
|
||||
attr.append((self.ldap.MOD_ADD, 'secretKey', \
|
||||
[secret_key]))
|
||||
attr.append((self.ldap.MOD_ADD, 'secretKey',
|
||||
[secret_key]))
|
||||
if 'accessKey' in user.keys():
|
||||
attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
|
||||
[access_key]))
|
||||
attr.append((self.ldap.MOD_REPLACE, 'accessKey',
|
||||
[access_key]))
|
||||
else:
|
||||
attr.append((self.ldap.MOD_ADD, 'accessKey', \
|
||||
[access_key]))
|
||||
if 'isAdmin' in user.keys():
|
||||
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
|
||||
[str(is_admin).upper()]))
|
||||
attr.append((self.ldap.MOD_ADD, 'accessKey',
|
||||
[access_key]))
|
||||
if LdapDriver.isadmin_attribute in user.keys():
|
||||
attr.append((self.ldap.MOD_REPLACE,
|
||||
LdapDriver.isadmin_attribute,
|
||||
[str(is_admin).upper()]))
|
||||
else:
|
||||
attr.append((self.ldap.MOD_ADD, 'isAdmin', \
|
||||
[str(is_admin).upper()]))
|
||||
attr.append((self.ldap.MOD_ADD,
|
||||
LdapDriver.isadmin_attribute,
|
||||
[str(is_admin).upper()]))
|
||||
self.conn.modify_s(self.__uid_to_dn(name), attr)
|
||||
return self.get_user(name)
|
||||
else:
|
||||
raise exception.NotFound("LDAP object for %s doesn't exist"
|
||||
raise exception.NotFound(_("LDAP object for %s doesn't exist")
|
||||
% name)
|
||||
else:
|
||||
attr = [
|
||||
@ -168,12 +185,12 @@ class LdapDriver(object):
|
||||
'inetOrgPerson',
|
||||
'novaUser']),
|
||||
('ou', [FLAGS.ldap_user_unit]),
|
||||
('uid', [name]),
|
||||
(FLAGS.ldap_user_id_attribute, [name]),
|
||||
('sn', [name]),
|
||||
('cn', [name]),
|
||||
(FLAGS.ldap_user_name_attribute, [name]),
|
||||
('secretKey', [secret_key]),
|
||||
('accessKey', [access_key]),
|
||||
('isAdmin', [str(is_admin).upper()]),
|
||||
(LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
|
||||
]
|
||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||
return self.__to_user(dict(attr))
|
||||
@ -182,11 +199,12 @@ class LdapDriver(object):
|
||||
description=None, member_uids=None):
|
||||
"""Create a project"""
|
||||
if self.__project_exists(name):
|
||||
raise exception.Duplicate("Project can't be created because "
|
||||
"project %s already exists" % name)
|
||||
raise exception.Duplicate(_("Project can't be created because "
|
||||
"project %s already exists") % name)
|
||||
if not self.__user_exists(manager_uid):
|
||||
raise exception.NotFound("Project can't be created because "
|
||||
"manager %s doesn't exist" % manager_uid)
|
||||
raise exception.NotFound(_("Project can't be created because "
|
||||
"manager %s doesn't exist")
|
||||
% manager_uid)
|
||||
manager_dn = self.__uid_to_dn(manager_uid)
|
||||
# description is a required attribute
|
||||
if description is None:
|
||||
@ -195,18 +213,18 @@ class LdapDriver(object):
|
||||
if member_uids is not None:
|
||||
for member_uid in member_uids:
|
||||
if not self.__user_exists(member_uid):
|
||||
raise exception.NotFound("Project can't be created "
|
||||
"because user %s doesn't exist"
|
||||
raise exception.NotFound(_("Project can't be created "
|
||||
"because user %s doesn't exist")
|
||||
% member_uid)
|
||||
members.append(self.__uid_to_dn(member_uid))
|
||||
# always add the manager as a member because members is required
|
||||
if not manager_dn in members:
|
||||
members.append(manager_dn)
|
||||
attr = [
|
||||
('objectclass', ['novaProject']),
|
||||
('objectclass', [LdapDriver.project_objectclass]),
|
||||
('cn', [name]),
|
||||
('description', [description]),
|
||||
('projectManager', [manager_dn]),
|
||||
(LdapDriver.project_attribute, [manager_dn]),
|
||||
('member', members)]
|
||||
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
|
||||
return self.__to_project(dict(attr))
|
||||
@ -218,11 +236,12 @@ class LdapDriver(object):
|
||||
attr = []
|
||||
if manager_uid:
|
||||
if not self.__user_exists(manager_uid):
|
||||
raise exception.NotFound("Project can't be modified because "
|
||||
"manager %s doesn't exist" %
|
||||
manager_uid)
|
||||
raise exception.NotFound(_("Project can't be modified because "
|
||||
"manager %s doesn't exist")
|
||||
% manager_uid)
|
||||
manager_dn = self.__uid_to_dn(manager_uid)
|
||||
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
|
||||
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
|
||||
manager_dn))
|
||||
if description:
|
||||
attr.append((self.ldap.MOD_REPLACE, 'description', description))
|
||||
self.conn.modify_s('cn=%s,%s' % (project_id,
|
||||
@ -282,10 +301,9 @@ class LdapDriver(object):
|
||||
return roles
|
||||
else:
|
||||
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
|
||||
roles = self.__find_objects(project_dn,
|
||||
'(&(&(objectclass=groupOfNames)'
|
||||
'(!(objectclass=novaProject)))'
|
||||
'(member=%s))' % self.__uid_to_dn(uid))
|
||||
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
|
||||
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
|
||||
roles = self.__find_objects(project_dn, query)
|
||||
return [role['cn'][0] for role in roles]
|
||||
|
||||
def delete_user(self, uid):
|
||||
@ -299,14 +317,15 @@ class LdapDriver(object):
|
||||
# Retrieve user by name
|
||||
user = self.__get_ldap_user(uid)
|
||||
if 'secretKey' in user.keys():
|
||||
attr.append((self.ldap.MOD_DELETE, 'secretKey', \
|
||||
user['secretKey']))
|
||||
attr.append((self.ldap.MOD_DELETE, 'secretKey',
|
||||
user['secretKey']))
|
||||
if 'accessKey' in user.keys():
|
||||
attr.append((self.ldap.MOD_DELETE, 'accessKey', \
|
||||
user['accessKey']))
|
||||
if 'isAdmin' in user.keys():
|
||||
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
|
||||
user['isAdmin']))
|
||||
attr.append((self.ldap.MOD_DELETE, 'accessKey',
|
||||
user['accessKey']))
|
||||
if LdapDriver.isadmin_attribute in user.keys():
|
||||
attr.append((self.ldap.MOD_DELETE,
|
||||
LdapDriver.isadmin_attribute,
|
||||
user[LdapDriver.isadmin_attribute]))
|
||||
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||
else:
|
||||
# Delete entry
|
||||
@ -328,7 +347,8 @@ class LdapDriver(object):
|
||||
if secret_key:
|
||||
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
|
||||
if admin is not None:
|
||||
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper()))
|
||||
attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
|
||||
str(admin).upper()))
|
||||
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||
|
||||
def __user_exists(self, uid):
|
||||
@ -346,7 +366,7 @@ class LdapDriver(object):
|
||||
def __get_ldap_user(self, uid):
|
||||
"""Retrieve LDAP user entry by id"""
|
||||
attr = self.__find_object(self.__uid_to_dn(uid),
|
||||
'(objectclass=novaUser)')
|
||||
'(objectclass=novaUser)')
|
||||
return attr
|
||||
|
||||
def __find_object(self, dn, query=None, scope=None):
|
||||
@ -382,19 +402,21 @@ class LdapDriver(object):
|
||||
|
||||
def __find_role_dns(self, tree):
|
||||
"""Find dns of role objects in given tree"""
|
||||
return self.__find_dns(tree,
|
||||
'(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
|
||||
query = ('(&(objectclass=groupOfNames)(!%s))' %
|
||||
LdapDriver.project_pattern)
|
||||
return self.__find_dns(tree, query)
|
||||
|
||||
def __find_group_dns_with_member(self, tree, uid):
|
||||
"""Find dns of group objects in a given tree that contain member"""
|
||||
dns = self.__find_dns(tree,
|
||||
'(&(objectclass=groupOfNames)(member=%s))' %
|
||||
self.__uid_to_dn(uid))
|
||||
query = ('(&(objectclass=groupOfNames)(member=%s))' %
|
||||
self.__uid_to_dn(uid))
|
||||
dns = self.__find_dns(tree, query)
|
||||
return dns
|
||||
|
||||
def __group_exists(self, dn):
|
||||
"""Check if group exists"""
|
||||
return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
|
||||
query = '(objectclass=groupOfNames)'
|
||||
return self.__find_object(dn, query) is not None
|
||||
|
||||
@staticmethod
|
||||
def __role_to_dn(role, project_id=None):
|
||||
@ -417,7 +439,8 @@ class LdapDriver(object):
|
||||
for member_uid in member_uids:
|
||||
if not self.__user_exists(member_uid):
|
||||
raise exception.NotFound("Group can't be created "
|
||||
"because user %s doesn't exist" % member_uid)
|
||||
"because user %s doesn't exist" %
|
||||
member_uid)
|
||||
members.append(self.__uid_to_dn(member_uid))
|
||||
dn = self.__uid_to_dn(uid)
|
||||
if not dn in members:
|
||||
@ -433,7 +456,7 @@ class LdapDriver(object):
|
||||
"""Check if user is in group"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be searched in group "
|
||||
"becuase the user doesn't exist" % (uid,))
|
||||
"because the user doesn't exist" % uid)
|
||||
if not self.__group_exists(group_dn):
|
||||
return False
|
||||
res = self.__find_object(group_dn,
|
||||
@ -445,13 +468,13 @@ class LdapDriver(object):
|
||||
"""Add user to group"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be added to the group "
|
||||
"becuase the user doesn't exist" % (uid,))
|
||||
"because the user doesn't exist" % uid)
|
||||
if not self.__group_exists(group_dn):
|
||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||
(group_dn,))
|
||||
group_dn)
|
||||
if self.__is_in_group(uid, group_dn):
|
||||
raise exception.Duplicate("User %s is already a member of "
|
||||
"the group %s" % (uid, group_dn))
|
||||
raise exception.Duplicate(_("User %s is already a member of "
|
||||
"the group %s") % (uid, group_dn))
|
||||
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
|
||||
self.conn.modify_s(group_dn, attr)
|
||||
|
||||
@ -459,16 +482,16 @@ class LdapDriver(object):
|
||||
"""Remove user from group"""
|
||||
if not self.__group_exists(group_dn):
|
||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||
(group_dn,))
|
||||
group_dn)
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be removed from the "
|
||||
"group because the user doesn't exist" % (uid,))
|
||||
"group because the user doesn't exist" %
|
||||
uid)
|
||||
if not self.__is_in_group(uid, group_dn):
|
||||
raise exception.NotFound("User %s is not a member of the group" %
|
||||
(uid,))
|
||||
uid)
|
||||
# NOTE(vish): remove user from group and any sub_groups
|
||||
sub_dns = self.__find_group_dns_with_member(
|
||||
group_dn, uid)
|
||||
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
|
||||
for sub_dn in sub_dns:
|
||||
self.__safe_remove_from_group(uid, sub_dn)
|
||||
|
||||
@ -479,15 +502,15 @@ class LdapDriver(object):
|
||||
try:
|
||||
self.conn.modify_s(group_dn, attr)
|
||||
except self.ldap.OBJECT_CLASS_VIOLATION:
|
||||
logging.debug("Attempted to remove the last member of a group. "
|
||||
"Deleting the group at %s instead.", group_dn)
|
||||
logging.debug(_("Attempted to remove the last member of a group. "
|
||||
"Deleting the group at %s instead."), group_dn)
|
||||
self.__delete_group(group_dn)
|
||||
|
||||
def __remove_from_all(self, uid):
|
||||
"""Remove user from all roles and projects"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be removed from all "
|
||||
"because the user doesn't exist" % (uid,))
|
||||
"because the user doesn't exist" % uid)
|
||||
role_dns = self.__find_group_dns_with_member(
|
||||
FLAGS.role_project_subtree, uid)
|
||||
for role_dn in role_dns:
|
||||
@ -500,7 +523,8 @@ class LdapDriver(object):
|
||||
def __delete_group(self, group_dn):
|
||||
"""Delete Group"""
|
||||
if not self.__group_exists(group_dn):
|
||||
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
|
||||
raise exception.NotFound(_("Group at dn %s doesn't exist")
|
||||
% group_dn)
|
||||
self.conn.delete_s(group_dn)
|
||||
|
||||
def __delete_roles(self, project_dn):
|
||||
@ -514,13 +538,13 @@ class LdapDriver(object):
|
||||
if attr is None:
|
||||
return None
|
||||
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
|
||||
and 'isAdmin' in attr.keys()):
|
||||
and LdapDriver.isadmin_attribute in attr.keys()):
|
||||
return {
|
||||
'id': attr['uid'][0],
|
||||
'name': attr['cn'][0],
|
||||
'id': attr[FLAGS.ldap_user_id_attribute][0],
|
||||
'name': attr[FLAGS.ldap_user_name_attribute][0],
|
||||
'access': attr['accessKey'][0],
|
||||
'secret': attr['secretKey'][0],
|
||||
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
||||
'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -532,7 +556,8 @@ class LdapDriver(object):
|
||||
return {
|
||||
'id': attr['cn'][0],
|
||||
'name': attr['cn'][0],
|
||||
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
|
||||
'project_manager_id':
|
||||
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
|
||||
'description': attr.get('description', [None])[0],
|
||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||
|
||||
@ -542,9 +567,10 @@ class LdapDriver(object):
|
||||
return dn.split(',')[0].split('=')[1]
|
||||
|
||||
@staticmethod
|
||||
def __uid_to_dn(dn):
|
||||
def __uid_to_dn(uid):
|
||||
"""Convert uid to dn"""
|
||||
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
|
||||
return (FLAGS.ldap_user_id_attribute + '=%s,%s'
|
||||
% (uid, FLAGS.ldap_user_subtree))
|
||||
|
||||
|
||||
class FakeLdapDriver(LdapDriver):
|
||||
|
@ -64,12 +64,9 @@ flags.DEFINE_string('credential_key_file', 'pk.pem',
|
||||
'Filename of private key in credentials zip')
|
||||
flags.DEFINE_string('credential_cert_file', 'cert.pem',
|
||||
'Filename of certificate in credentials zip')
|
||||
flags.DEFINE_string('credential_rc_file', 'novarc',
|
||||
'Filename of rc in credentials zip')
|
||||
flags.DEFINE_string('credential_cert_subject',
|
||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
||||
'OU=NovaDev/CN=%s-%s',
|
||||
'Subject for certificate for users')
|
||||
flags.DEFINE_string('credential_rc_file', '%src',
|
||||
'Filename of rc in credentials zip, %s will be '
|
||||
'replaced by name of the region (nova by default)')
|
||||
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||
'Driver that auth manager uses')
|
||||
|
||||
@ -257,12 +254,12 @@ class AuthManager(object):
|
||||
# TODO(vish): check for valid timestamp
|
||||
(access_key, _sep, project_id) = access.partition(':')
|
||||
|
||||
logging.info('Looking up user: %r', access_key)
|
||||
logging.info(_('Looking up user: %r'), access_key)
|
||||
user = self.get_user_from_access_key(access_key)
|
||||
logging.info('user: %r', user)
|
||||
if user == None:
|
||||
raise exception.NotFound('No user found for access key %s' %
|
||||
access_key)
|
||||
raise exception.NotFound(_('No user found for access key %s')
|
||||
% access_key)
|
||||
|
||||
# NOTE(vish): if we stop using project name as id we need better
|
||||
# logic to find a default project for user
|
||||
@ -271,12 +268,12 @@ class AuthManager(object):
|
||||
|
||||
project = self.get_project(project_id)
|
||||
if project == None:
|
||||
raise exception.NotFound('No project called %s could be found' %
|
||||
project_id)
|
||||
raise exception.NotFound(_('No project called %s could be found')
|
||||
% project_id)
|
||||
if not self.is_admin(user) and not self.is_project_member(user,
|
||||
project):
|
||||
raise exception.NotFound('User %s is not a member of project %s' %
|
||||
(user.id, project.id))
|
||||
raise exception.NotFound(_('User %s is not a member of project %s')
|
||||
% (user.id, project.id))
|
||||
if check_type == 's3':
|
||||
sign = signer.Signer(user.secret.encode())
|
||||
expected_signature = sign.s3_authorization(headers, verb, path)
|
||||
@ -284,7 +281,7 @@ class AuthManager(object):
|
||||
logging.debug('expected_signature: %s', expected_signature)
|
||||
logging.debug('signature: %s', signature)
|
||||
if signature != expected_signature:
|
||||
raise exception.NotAuthorized('Signature does not match')
|
||||
raise exception.NotAuthorized(_('Signature does not match'))
|
||||
elif check_type == 'ec2':
|
||||
# NOTE(vish): hmac can't handle unicode, so encode ensures that
|
||||
# secret isn't unicode
|
||||
@ -294,7 +291,7 @@ class AuthManager(object):
|
||||
logging.debug('expected_signature: %s', expected_signature)
|
||||
logging.debug('signature: %s', signature)
|
||||
if signature != expected_signature:
|
||||
raise exception.NotAuthorized('Signature does not match')
|
||||
raise exception.NotAuthorized(_('Signature does not match'))
|
||||
return (user, project)
|
||||
|
||||
def get_access_key(self, user, project):
|
||||
@ -364,7 +361,7 @@ class AuthManager(object):
|
||||
with self.driver() as drv:
|
||||
if role == 'projectmanager':
|
||||
if not project:
|
||||
raise exception.Error("Must specify project")
|
||||
raise exception.Error(_("Must specify project"))
|
||||
return self.is_project_manager(user, project)
|
||||
|
||||
global_role = drv.has_role(User.safe_id(user),
|
||||
@ -398,9 +395,9 @@ class AuthManager(object):
|
||||
@param project: Project in which to add local role.
|
||||
"""
|
||||
if role not in FLAGS.allowed_roles:
|
||||
raise exception.NotFound("The %s role can not be found" % role)
|
||||
raise exception.NotFound(_("The %s role can not be found") % role)
|
||||
if project is not None and role in FLAGS.global_roles:
|
||||
raise exception.NotFound("The %s role is global only" % role)
|
||||
raise exception.NotFound(_("The %s role is global only") % role)
|
||||
with self.driver() as drv:
|
||||
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
|
||||
|
||||
@ -543,10 +540,10 @@ class AuthManager(object):
|
||||
"""
|
||||
|
||||
network_ref = db.project_get_network(context.get_admin_context(),
|
||||
Project.safe_id(project))
|
||||
Project.safe_id(project), False)
|
||||
|
||||
if not network_ref['vpn_public_port']:
|
||||
raise exception.NotFound('project network data has not been set')
|
||||
if not network_ref:
|
||||
return (None, None)
|
||||
return (network_ref['vpn_public_address'],
|
||||
network_ref['vpn_public_port'])
|
||||
|
||||
@ -628,27 +625,37 @@ class AuthManager(object):
|
||||
def get_key_pairs(context):
|
||||
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
|
||||
|
||||
def get_credentials(self, user, project=None):
|
||||
def get_credentials(self, user, project=None, use_dmz=True):
|
||||
"""Get credential zip for user in project"""
|
||||
if not isinstance(user, User):
|
||||
user = self.get_user(user)
|
||||
if project is None:
|
||||
project = user.id
|
||||
pid = Project.safe_id(project)
|
||||
rc = self.__generate_rc(user.access, user.secret, pid)
|
||||
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
|
||||
private_key, signed_cert = crypto.generate_x509_cert(user.id, pid)
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
zf = os.path.join(tmpdir, "temp.zip")
|
||||
zippy = zipfile.ZipFile(zf, 'w')
|
||||
zippy.writestr(FLAGS.credential_rc_file, rc)
|
||||
if use_dmz and FLAGS.region_list:
|
||||
regions = {}
|
||||
for item in FLAGS.region_list:
|
||||
region, _sep, region_host = item.partition("=")
|
||||
regions[region] = region_host
|
||||
else:
|
||||
regions = {'nova': FLAGS.cc_host}
|
||||
for region, host in regions.iteritems():
|
||||
rc = self.__generate_rc(user.access,
|
||||
user.secret,
|
||||
pid,
|
||||
use_dmz,
|
||||
host)
|
||||
zippy.writestr(FLAGS.credential_rc_file % region, rc)
|
||||
|
||||
zippy.writestr(FLAGS.credential_key_file, private_key)
|
||||
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
||||
|
||||
try:
|
||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||
except exception.NotFound:
|
||||
vpn_ip = None
|
||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||
if vpn_ip:
|
||||
configfile = open(FLAGS.vpn_client_template, "r")
|
||||
s = string.Template(configfile.read())
|
||||
@ -659,10 +666,9 @@ class AuthManager(object):
|
||||
port=vpn_port)
|
||||
zippy.writestr(FLAGS.credential_vpn_file, config)
|
||||
else:
|
||||
logging.warn("No vpn data for project %s" %
|
||||
pid)
|
||||
logging.warn(_("No vpn data for project %s"), pid)
|
||||
|
||||
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
|
||||
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
|
||||
zippy.close()
|
||||
with open(zf, 'rb') as f:
|
||||
read_buffer = f.read()
|
||||
@ -670,38 +676,38 @@ class AuthManager(object):
|
||||
shutil.rmtree(tmpdir)
|
||||
return read_buffer
|
||||
|
||||
def get_environment_rc(self, user, project=None):
|
||||
def get_environment_rc(self, user, project=None, use_dmz=True):
|
||||
"""Get credential zip for user in project"""
|
||||
if not isinstance(user, User):
|
||||
user = self.get_user(user)
|
||||
if project is None:
|
||||
project = user.id
|
||||
pid = Project.safe_id(project)
|
||||
return self.__generate_rc(user.access, user.secret, pid)
|
||||
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
|
||||
|
||||
@staticmethod
|
||||
def __generate_rc(access, secret, pid):
|
||||
def __generate_rc(access, secret, pid, use_dmz=True, host=None):
|
||||
"""Generate rc file for user"""
|
||||
if use_dmz:
|
||||
cc_host = FLAGS.cc_dmz
|
||||
else:
|
||||
cc_host = FLAGS.cc_host
|
||||
# NOTE(vish): Always use the dmz since it is used from inside the
|
||||
# instance
|
||||
s3_host = FLAGS.s3_dmz
|
||||
if host:
|
||||
s3_host = host
|
||||
cc_host = host
|
||||
rc = open(FLAGS.credentials_template).read()
|
||||
rc = rc % {'access': access,
|
||||
'project': pid,
|
||||
'secret': secret,
|
||||
'ec2': FLAGS.ec2_url,
|
||||
's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
|
||||
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
|
||||
cc_host,
|
||||
FLAGS.cc_port,
|
||||
FLAGS.ec2_suffix),
|
||||
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
|
||||
'nova': FLAGS.ca_file,
|
||||
'cert': FLAGS.credential_cert_file,
|
||||
'key': FLAGS.credential_key_file}
|
||||
return rc
|
||||
|
||||
def _generate_x509_cert(self, uid, pid):
|
||||
"""Generate x509 cert for user"""
|
||||
(private_key, csr) = crypto.generate_x509_cert(
|
||||
self.__cert_subject(uid))
|
||||
# TODO(joshua): This should be async call back to the cloud controller
|
||||
signed_cert = crypto.sign_csr(csr, pid)
|
||||
return (private_key, signed_cert)
|
||||
|
||||
@staticmethod
|
||||
def __cert_subject(uid):
|
||||
"""Helper to generate cert subject"""
|
||||
return FLAGS.credential_cert_subject % (uid, utils.isotime())
|
||||
|
@ -1,7 +1,9 @@
|
||||
#
|
||||
# Person object for Nova
|
||||
# inetorgperson with extra attributes
|
||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||
# Schema version: 2
|
||||
# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
# Ryan Lane <rlane@wikimedia.org>
|
||||
#
|
||||
#
|
||||
|
||||
@ -30,55 +32,19 @@ attributetype (
|
||||
SINGLE-VALUE
|
||||
)
|
||||
|
||||
attributetype (
|
||||
novaAttrs:3
|
||||
NAME 'keyFingerprint'
|
||||
DESC 'Fingerprint of private key'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||
SINGLE-VALUE
|
||||
)
|
||||
|
||||
attributetype (
|
||||
novaAttrs:4
|
||||
NAME 'isAdmin'
|
||||
DESC 'Is user an administrator?'
|
||||
NAME 'isNovaAdmin'
|
||||
DESC 'Is user an nova administrator?'
|
||||
EQUALITY booleanMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
||||
SINGLE-VALUE
|
||||
)
|
||||
|
||||
attributetype (
|
||||
novaAttrs:5
|
||||
NAME 'projectManager'
|
||||
DESC 'Project Managers of a project'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
||||
)
|
||||
|
||||
objectClass (
|
||||
novaOCs:1
|
||||
NAME 'novaUser'
|
||||
DESC 'access and secret keys'
|
||||
AUXILIARY
|
||||
MUST ( uid )
|
||||
MAY ( accessKey $ secretKey $ isAdmin )
|
||||
)
|
||||
|
||||
objectClass (
|
||||
novaOCs:2
|
||||
NAME 'novaKeyPair'
|
||||
DESC 'Key pair for User'
|
||||
SUP top
|
||||
STRUCTURAL
|
||||
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
||||
)
|
||||
|
||||
objectClass (
|
||||
novaOCs:3
|
||||
NAME 'novaProject'
|
||||
DESC 'Container for project'
|
||||
SUP groupOfNames
|
||||
STRUCTURAL
|
||||
MUST ( cn $ projectManager )
|
||||
MAY ( accessKey $ secretKey $ isNovaAdmin )
|
||||
)
|
||||
|
@ -1,16 +1,13 @@
|
||||
#
|
||||
# Person object for Nova
|
||||
# inetorgperson with extra attributes
|
||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
|
||||
# Schema version: 2
|
||||
# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
# Ryan Lane <rlane@wikimedia.org>
|
||||
#
|
||||
# using internet experimental oid arc as per BP64 3.1
|
||||
dn: cn=schema
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
|
||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
|
||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
|
||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
|
||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) )
|
||||
|
@ -32,7 +32,6 @@ abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||
schemapath='/var/opendj/instance/config/schema'
|
||||
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
|
||||
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
|
||||
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
|
||||
chown opendj:opendj $schemapath/98-nova_sun.ldif
|
||||
|
||||
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
||||
|
@ -22,7 +22,7 @@ apt-get install -y slapd ldap-utils python-ldap
|
||||
|
||||
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
|
||||
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
|
||||
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema
|
||||
|
||||
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
||||
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
||||
@ -33,7 +33,6 @@ cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
||||
include /etc/ldap/schema/core.schema
|
||||
include /etc/ldap/schema/cosine.schema
|
||||
include /etc/ldap/schema/inetorgperson.schema
|
||||
include /etc/ldap/schema/openssh-lpk_openldap.schema
|
||||
include /etc/ldap/schema/nova.schema
|
||||
pidfile /var/run/slapd/slapd.pid
|
||||
argsfile /var/run/slapd/slapd.args
|
||||
|
@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This gets zipped and run on the cloudpipe-managed OpenVPN server
|
||||
|
||||
export SUPERVISOR="http://10.255.255.1:8773/cloudpipe"
|
||||
export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $1}'`
|
||||
export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'`
|
||||
export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'`
|
||||
export GATEWAY=`netstat -r | grep default | cut -d' ' -f10`
|
||||
export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP"
|
||||
|
||||
DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'`
|
||||
DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'`
|
||||
|
||||
# generate a server DH
|
||||
openssl dhparam -out /etc/openvpn/dh1024.pem 1024
|
||||
|
||||
# generate a server priv key
|
||||
openssl genrsa -out /etc/openvpn/server.key 2048
|
||||
|
||||
# generate a server CSR
|
||||
openssl req -new -key /etc/openvpn/server.key -out /etc/openvpn/server.csr -batch -subj "$SUBJ"
|
||||
|
||||
# URLEncode the CSR
|
||||
CSRTEXT=`cat /etc/openvpn/server.csr`
|
||||
CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')")
|
||||
|
||||
# SIGN the csr and save as server.crt
|
||||
# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file
|
||||
curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
|
||||
curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
|
||||
|
||||
# Customize the server.conf.template
|
||||
cd /etc/openvpn
|
||||
|
||||
sed -e s/VPN_IP/$VPN_IP/g server.conf.template > server.conf
|
||||
sed -i -e s/DHCP_SUBNET/$DHCP_MASK/g server.conf
|
||||
sed -i -e s/DHCP_LOWER/$DHCP_LOWER/g server.conf
|
||||
sed -i -e s/DHCP_UPPER/$DHCP_UPPER/g server.conf
|
||||
sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf
|
||||
|
||||
echo "\npush \"route 10.255.255.1 255.255.255.255 $GATEWAY\"\n" >> server.conf
|
||||
echo "\npush \"route 10.255.255.253 255.255.255.255 $GATEWAY\"\n" >> server.conf
|
||||
echo "\nduplicate-cn\n" >> server.conf
|
||||
|
||||
/etc/init.d/openvpn start
|
51
nova/cloudpipe/bootscript.template
Executable file
51
nova/cloudpipe/bootscript.template
Executable file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This gets zipped and run on the cloudpipe-managed OpenVPN server
|
||||
|
||||
export LC_ALL=C
|
||||
export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $$1}'`
|
||||
export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $$1}'`
|
||||
export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $$1}'`
|
||||
export GATEWAY=`netstat -r | grep default | cut -d' ' -f10`
|
||||
|
||||
DHCP_LOWER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - ${num_vpn} }'`
|
||||
DHCP_UPPER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - 1 }'`
|
||||
|
||||
# generate a server DH
|
||||
openssl dhparam -out /etc/openvpn/dh1024.pem 1024
|
||||
|
||||
cp crl.pem /etc/openvpn/
|
||||
cp server.key /etc/openvpn/
|
||||
cp ca.crt /etc/openvpn/
|
||||
cp server.crt /etc/openvpn/
|
||||
# Customize the server.conf.template
|
||||
cd /etc/openvpn
|
||||
|
||||
sed -e s/VPN_IP/$$VPN_IP/g server.conf.template > server.conf
|
||||
sed -i -e s/DHCP_SUBNET/$$DHCP_MASK/g server.conf
|
||||
sed -i -e s/DHCP_LOWER/$$DHCP_LOWER/g server.conf
|
||||
sed -i -e s/DHCP_UPPER/$$DHCP_UPPER/g server.conf
|
||||
sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf
|
||||
|
||||
echo "push \"route ${dmz_net} ${dmz_mask} $$GATEWAY\"" >> server.conf
|
||||
echo "duplicate-cn" >> server.conf
|
||||
echo "crl-verify /etc/openvpn/crl.pem" >> server.conf
|
||||
|
||||
/etc/init.d/openvpn start
|
@ -22,13 +22,15 @@ an instance with it.
|
||||
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import string
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
@ -39,8 +41,17 @@ from nova.api.ec2 import cloud
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('boot_script_template',
|
||||
utils.abspath('cloudpipe/bootscript.sh'),
|
||||
'Template for script to run on cloudpipe instance boot')
|
||||
utils.abspath('cloudpipe/bootscript.template'),
|
||||
_('Template for script to run on cloudpipe instance boot'))
|
||||
flags.DEFINE_string('dmz_net',
|
||||
'10.0.0.0',
|
||||
_('Network to push into openvpn config'))
|
||||
flags.DEFINE_string('dmz_mask',
|
||||
'255.255.255.0',
|
||||
_('Netmask to push into openvpn config'))
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova-cloudpipe')
|
||||
|
||||
|
||||
class CloudPipe(object):
|
||||
@ -48,64 +59,96 @@ class CloudPipe(object):
|
||||
self.controller = cloud.CloudController()
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
def launch_vpn_instance(self, project_id):
|
||||
logging.debug("Launching VPN for %s" % (project_id))
|
||||
project = self.manager.get_project(project_id)
|
||||
def get_encoded_zip(self, project_id):
|
||||
# Make a payload.zip
|
||||
tmpfolder = tempfile.mkdtemp()
|
||||
filename = "payload.zip"
|
||||
zippath = os.path.join(tmpfolder, filename)
|
||||
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
|
||||
|
||||
z.write(FLAGS.boot_script_template, 'autorun.sh')
|
||||
shellfile = open(FLAGS.boot_script_template, "r")
|
||||
s = string.Template(shellfile.read())
|
||||
shellfile.close()
|
||||
boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz,
|
||||
cc_port=FLAGS.cc_port,
|
||||
dmz_net=FLAGS.dmz_net,
|
||||
dmz_mask=FLAGS.dmz_mask,
|
||||
num_vpn=FLAGS.cnt_vpn_clients)
|
||||
# genvpn, sign csr
|
||||
crypto.generate_vpn_files(project_id)
|
||||
z.writestr('autorun.sh', boot_script)
|
||||
crl = os.path.join(crypto.ca_folder(project_id), 'crl.pem')
|
||||
z.write(crl, 'crl.pem')
|
||||
server_key = os.path.join(crypto.ca_folder(project_id), 'server.key')
|
||||
z.write(server_key, 'server.key')
|
||||
ca_crt = os.path.join(crypto.ca_path(project_id))
|
||||
z.write(ca_crt, 'ca.crt')
|
||||
server_crt = os.path.join(crypto.ca_folder(project_id), 'server.crt')
|
||||
z.write(server_crt, 'server.crt')
|
||||
z.close()
|
||||
|
||||
key_name = self.setup_key_pair(project.project_manager_id, project_id)
|
||||
zippy = open(zippath, "r")
|
||||
context = context.RequestContext(user=project.project_manager,
|
||||
project=project)
|
||||
# NOTE(vish): run instances expects encoded userdata, it is decoded
|
||||
# in the get_metadata_call. autorun.sh also decodes the zip file,
|
||||
# hence the double encoding.
|
||||
encoded = zippy.read().encode("base64").encode("base64")
|
||||
zippy.close()
|
||||
return encoded
|
||||
|
||||
reservation = self.controller.run_instances(context,
|
||||
# Run instances expects encoded userdata, it is decoded in the
|
||||
# get_metadata_call. autorun.sh also decodes the zip file, hence
|
||||
# the double encoding.
|
||||
user_data=zippy.read().encode("base64").encode("base64"),
|
||||
def launch_vpn_instance(self, project_id):
|
||||
LOG.debug(_("Launching VPN for %s") % (project_id))
|
||||
project = self.manager.get_project(project_id)
|
||||
ctxt = context.RequestContext(user=project.project_manager,
|
||||
project=project)
|
||||
key_name = self.setup_key_pair(ctxt)
|
||||
group_name = self.setup_security_group(ctxt)
|
||||
|
||||
reservation = self.controller.run_instances(ctxt,
|
||||
user_data=self.get_encoded_zip(project_id),
|
||||
max_count=1,
|
||||
min_count=1,
|
||||
instance_type='m1.tiny',
|
||||
image_id=FLAGS.vpn_image_id,
|
||||
key_name=key_name,
|
||||
security_groups=["vpn-secgroup"])
|
||||
zippy.close()
|
||||
security_group=[group_name])
|
||||
|
||||
def setup_key_pair(self, user_id, project_id):
|
||||
key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
|
||||
def setup_security_group(self, context):
|
||||
group_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix)
|
||||
if db.security_group_exists(context, context.project.id, group_name):
|
||||
return group_name
|
||||
group = {'user_id': context.user.id,
|
||||
'project_id': context.project.id,
|
||||
'name': group_name,
|
||||
'description': 'Group for vpn'}
|
||||
group_ref = db.security_group_create(context, group)
|
||||
rule = {'parent_group_id': group_ref['id'],
|
||||
'cidr': '0.0.0.0/0',
|
||||
'protocol': 'udp',
|
||||
'from_port': 1194,
|
||||
'to_port': 1194}
|
||||
db.security_group_rule_create(context, rule)
|
||||
rule = {'parent_group_id': group_ref['id'],
|
||||
'cidr': '0.0.0.0/0',
|
||||
'protocol': 'icmp',
|
||||
'from_port': -1,
|
||||
'to_port': -1}
|
||||
db.security_group_rule_create(context, rule)
|
||||
# NOTE(vish): No need to trigger the group since the instance
|
||||
# has not been run yet.
|
||||
return group_name
|
||||
|
||||
def setup_key_pair(self, context):
|
||||
key_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix)
|
||||
try:
|
||||
private_key, fingerprint = self.manager.generate_key_pair(user_id,
|
||||
key_name)
|
||||
result = cloud._gen_key(context, context.user.id, key_name)
|
||||
private_key = result['private_key']
|
||||
try:
|
||||
key_dir = os.path.join(FLAGS.keys_path, user_id)
|
||||
key_dir = os.path.join(FLAGS.keys_path, context.user.id)
|
||||
if not os.path.exists(key_dir):
|
||||
os.makedirs(key_dir)
|
||||
file_name = os.path.join(key_dir, '%s.pem' % key_name)
|
||||
with open(file_name, 'w') as f:
|
||||
key_path = os.path.join(key_dir, '%s.pem' % key_name)
|
||||
with open(key_path, 'w') as f:
|
||||
f.write(private_key)
|
||||
except:
|
||||
pass
|
||||
except exception.Duplicate:
|
||||
pass
|
||||
return key_name
|
||||
|
||||
# def setup_secgroups(self, username):
|
||||
# conn = self.euca.connection_for(username)
|
||||
# try:
|
||||
# secgroup = conn.create_security_group("vpn-secgroup",
|
||||
# "vpn-secgroup")
|
||||
# secgroup.authorize(ip_protocol = "udp", from_port = "1194",
|
||||
# to_port = "1194", cidr_ip = "0.0.0.0/0")
|
||||
# secgroup.authorize(ip_protocol = "tcp", from_port = "80",
|
||||
# to_port = "80", cidr_ip = "0.0.0.0/0")
|
||||
# secgroup.authorize(ip_protocol = "tcp", from_port = "22",
|
||||
# to_port = "22", cidr_ip = "0.0.0.0/0")
|
||||
# except:
|
||||
# pass
|
||||
|
@ -53,10 +53,29 @@ class ComputeAPI(base.Base):
|
||||
self.image_service = image_service
|
||||
super(ComputeAPI, self).__init__(**kwargs)
|
||||
|
||||
def get_network_topic(self, context, instance_id):
|
||||
try:
|
||||
instance = self.db.instance_get_by_internal_id(context,
|
||||
instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning("Instance %d was not found in get_network_topic",
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
host = instance['host']
|
||||
if not host:
|
||||
raise exception.Error("Instance %d has no host" % instance_id)
|
||||
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
|
||||
return rpc.call(context,
|
||||
topic,
|
||||
{"method": "get_network_topic", "args": {'fake': 1}})
|
||||
|
||||
def create_instances(self, context, instance_type, image_id, min_count=1,
|
||||
max_count=1, kernel_id=None, ramdisk_id=None,
|
||||
display_name='', description='', key_name=None,
|
||||
key_data=None, security_group='default',
|
||||
availability_zone=None,
|
||||
user_data=None,
|
||||
generate_hostname=generate_default_hostname):
|
||||
"""Create the number of instances requested if quote and
|
||||
other arguments check out ok."""
|
||||
@ -74,13 +93,19 @@ class ComputeAPI(base.Base):
|
||||
if not is_vpn:
|
||||
image = self.image_service.show(context, image_id)
|
||||
if kernel_id is None:
|
||||
kernel_id = image.get('kernelId', FLAGS.default_kernel)
|
||||
kernel_id = image.get('kernelId', None)
|
||||
if ramdisk_id is None:
|
||||
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
|
||||
|
||||
# Make sure we have access to kernel and ramdisk
|
||||
self.image_service.show(context, kernel_id)
|
||||
self.image_service.show(context, ramdisk_id)
|
||||
ramdisk_id = image.get('ramdiskId', None)
|
||||
#No kernel and ramdisk for raw images
|
||||
if kernel_id == str(FLAGS.null_kernel):
|
||||
kernel_id = None
|
||||
ramdisk_id = None
|
||||
logging.debug("Creating a raw instance")
|
||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||
if kernel_id:
|
||||
self.image_service.show(context, kernel_id)
|
||||
if ramdisk_id:
|
||||
self.image_service.show(context, ramdisk_id)
|
||||
|
||||
if security_group is None:
|
||||
security_group = ['default']
|
||||
@ -103,8 +128,8 @@ class ComputeAPI(base.Base):
|
||||
base_options = {
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'kernel_id': kernel_id or '',
|
||||
'ramdisk_id': ramdisk_id or '',
|
||||
'state_description': 'scheduling',
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
@ -115,12 +140,14 @@ class ComputeAPI(base.Base):
|
||||
'local_gb': type_data['local_gb'],
|
||||
'display_name': display_name,
|
||||
'display_description': description,
|
||||
'user_data': user_data or '',
|
||||
'key_name': key_name,
|
||||
'key_data': key_data}
|
||||
'key_data': key_data,
|
||||
'availability_zone': availability_zone}
|
||||
|
||||
elevated = context.elevated()
|
||||
instances = []
|
||||
logging.debug("Going to run %s instances...", num_instances)
|
||||
logging.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = dict(mac_address=utils.generate_mac(),
|
||||
launch_index=num,
|
||||
@ -145,19 +172,7 @@ class ComputeAPI(base.Base):
|
||||
instance = self.update_instance(context, instance_id, **updates)
|
||||
instances.append(instance)
|
||||
|
||||
# TODO(vish): This probably should be done in the scheduler
|
||||
# or in compute as a call. The network should be
|
||||
# allocated after the host is assigned and setup
|
||||
# can happen at the same time.
|
||||
address = self.network_manager.allocate_fixed_ip(context,
|
||||
instance_id,
|
||||
is_vpn)
|
||||
rpc.cast(elevated,
|
||||
self._get_network_topic(context),
|
||||
{"method": "setup_fixed_ip",
|
||||
"args": {"address": address}})
|
||||
|
||||
logging.debug("Casting to scheduler for %s/%s's instance %s",
|
||||
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
|
||||
context.project_id, context.user_id, instance_id)
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
@ -204,12 +219,12 @@ class ComputeAPI(base.Base):
|
||||
instance = self.db.instance_get_by_internal_id(context,
|
||||
instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning("Instance %d was not found during terminate",
|
||||
logging.warning(_("Instance %d was not found during terminate"),
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
if (instance['state_description'] == 'terminating'):
|
||||
logging.warning("Instance %d is already being terminated",
|
||||
logging.warning(_("Instance %d is already being terminated"),
|
||||
instance_id)
|
||||
return
|
||||
|
||||
@ -219,28 +234,6 @@ class ComputeAPI(base.Base):
|
||||
state=0,
|
||||
terminated_at=datetime.datetime.utcnow())
|
||||
|
||||
# FIXME(ja): where should network deallocate occur?
|
||||
address = self.db.instance_get_floating_address(context,
|
||||
instance['id'])
|
||||
if address:
|
||||
logging.debug("Disassociating address %s" % address)
|
||||
# NOTE(vish): Right now we don't really care if the ip is
|
||||
# disassociated. We may need to worry about
|
||||
# checking this later. Perhaps in the scheduler?
|
||||
rpc.cast(context,
|
||||
self._get_network_topic(context),
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"floating_address": address}})
|
||||
|
||||
address = self.db.instance_get_fixed_address(context, instance['id'])
|
||||
if address:
|
||||
logging.debug("Deallocating address %s" % address)
|
||||
# NOTE(vish): Currently, nothing needs to be done on the
|
||||
# network node until release. If this changes,
|
||||
# we will need to cast here.
|
||||
self.network_manager.deallocate_fixed_ip(context.elevated(),
|
||||
address)
|
||||
|
||||
host = instance['host']
|
||||
if host:
|
||||
rpc.cast(context,
|
||||
@ -266,6 +259,15 @@ class ComputeAPI(base.Base):
|
||||
def get_instance(self, context, instance_id):
|
||||
return self.db.instance_get_by_internal_id(context, instance_id)
|
||||
|
||||
def snapshot(self, context, instance_id, name):
|
||||
"""Snapshot the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "snapshot_instance",
|
||||
"args": {"instance_id": instance['id'], "name": name}})
|
||||
|
||||
def reboot(self, context, instance_id):
|
||||
"""Reboot the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
@ -275,6 +277,56 @@ class ComputeAPI(base.Base):
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def pause(self, context, instance_id):
|
||||
"""Pause the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "pause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def unpause(self, context, instance_id):
|
||||
"""Unpause the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unpause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def get_diagnostics(self, context, instance_id):
|
||||
"""Retrieve diagnostics for the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance["host"]
|
||||
return rpc.call(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "get_diagnostics",
|
||||
"args": {"instance_id": instance["id"]}})
|
||||
|
||||
def get_actions(self, context, instance_id):
|
||||
"""Retrieve actions for the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
return self.db.instance_get_actions(context, instance["id"])
|
||||
|
||||
def suspend(self, context, instance_id):
|
||||
"""suspend the instance with instance_id"""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "suspend_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def resume(self, context, instance_id):
|
||||
"""resume the instance with instance_id"""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "resume_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def rescue(self, context, instance_id):
|
||||
"""Rescue the given instance."""
|
||||
instance = self.db.instance_get_by_internal_id(context, instance_id)
|
||||
@ -292,14 +344,3 @@ class ComputeAPI(base.Base):
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unrescue_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def _get_network_topic(self, context):
|
||||
"""Retrieves the network host for a project"""
|
||||
network_ref = self.network_manager.get_network(context)
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
host = rpc.call(context,
|
||||
FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"network_id": network_ref['id']}})
|
||||
return self.db.queue_get_for(context, FLAGS.network_topic, host)
|
||||
|
@ -26,8 +26,6 @@ import logging
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
|
||||
@ -39,7 +37,6 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
|
||||
'block_size to use for dd')
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def partition(infile, outfile, local_bytes=0, resize=True,
|
||||
local_type='ext2', execute=None):
|
||||
"""
|
||||
@ -64,18 +61,18 @@ def partition(infile, outfile, local_bytes=0, resize=True,
|
||||
file_size = os.path.getsize(infile)
|
||||
if resize and file_size < FLAGS.minimum_root_size:
|
||||
last_sector = FLAGS.minimum_root_size / sector_size - 1
|
||||
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (infile, last_sector, sector_size))
|
||||
yield execute('e2fsck -fp %s' % infile, check_exit_code=False)
|
||||
yield execute('resize2fs %s' % infile)
|
||||
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (infile, last_sector, sector_size))
|
||||
execute('e2fsck -fp %s' % infile, check_exit_code=False)
|
||||
execute('resize2fs %s' % infile)
|
||||
file_size = FLAGS.minimum_root_size
|
||||
elif file_size % sector_size != 0:
|
||||
logging.warn("Input partition size not evenly divisible by"
|
||||
" sector size: %d / %d", file_size, sector_size)
|
||||
logging.warn(_("Input partition size not evenly divisible by"
|
||||
" sector size: %d / %d"), file_size, sector_size)
|
||||
primary_sectors = file_size / sector_size
|
||||
if local_bytes % sector_size != 0:
|
||||
logging.warn("Bytes for local storage not evenly divisible"
|
||||
" by sector size: %d / %d", local_bytes, sector_size)
|
||||
logging.warn(_("Bytes for local storage not evenly divisible"
|
||||
" by sector size: %d / %d"), local_bytes, sector_size)
|
||||
local_sectors = local_bytes / sector_size
|
||||
|
||||
mbr_last = 62 # a
|
||||
@ -86,30 +83,36 @@ def partition(infile, outfile, local_bytes=0, resize=True,
|
||||
last_sector = local_last # e
|
||||
|
||||
# create an empty file
|
||||
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (outfile, mbr_last, sector_size))
|
||||
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (outfile, mbr_last, sector_size))
|
||||
|
||||
# make mbr partition
|
||||
yield execute('parted --script %s mklabel msdos' % outfile)
|
||||
execute('parted --script %s mklabel msdos' % outfile)
|
||||
|
||||
# append primary file
|
||||
yield execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
|
||||
% (infile, outfile, FLAGS.block_size))
|
||||
execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
|
||||
% (infile, outfile, FLAGS.block_size))
|
||||
|
||||
# make primary partition
|
||||
yield execute('parted --script %s mkpart primary %ds %ds'
|
||||
% (outfile, primary_first, primary_last))
|
||||
execute('parted --script %s mkpart primary %ds %ds'
|
||||
% (outfile, primary_first, primary_last))
|
||||
|
||||
if local_bytes > 0:
|
||||
# make the file bigger
|
||||
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (outfile, last_sector, sector_size))
|
||||
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
|
||||
% (outfile, last_sector, sector_size))
|
||||
# make and format local partition
|
||||
yield execute('parted --script %s mkpartfs primary %s %ds %ds'
|
||||
% (outfile, local_type, local_first, local_last))
|
||||
execute('parted --script %s mkpartfs primary %s %ds %ds'
|
||||
% (outfile, local_type, local_first, local_last))
|
||||
|
||||
|
||||
def extend(image, size, execute):
|
||||
file_size = os.path.getsize(image)
|
||||
if file_size >= size:
|
||||
return
|
||||
return execute('truncate -s size %s' % (image,))
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def inject_data(image, key=None, net=None, partition=None, execute=None):
|
||||
"""Injects a ssh key and optionally net data into a disk image.
|
||||
|
||||
@ -119,74 +122,83 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
|
||||
If partition is not specified it mounts the image as a single partition.
|
||||
|
||||
"""
|
||||
out, err = yield execute('sudo losetup -f --show %s' % image)
|
||||
out, err = execute('sudo losetup --find --show %s' % image)
|
||||
if err:
|
||||
raise exception.Error('Could not attach image to loopback: %s' % err)
|
||||
raise exception.Error(_('Could not attach image to loopback: %s')
|
||||
% err)
|
||||
device = out.strip()
|
||||
try:
|
||||
if not partition is None:
|
||||
# create partition
|
||||
out, err = yield execute('sudo kpartx -a %s' % device)
|
||||
out, err = execute('sudo kpartx -a %s' % device)
|
||||
if err:
|
||||
raise exception.Error('Failed to load partition: %s' % err)
|
||||
raise exception.Error(_('Failed to load partition: %s') % err)
|
||||
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
|
||||
partition)
|
||||
else:
|
||||
mapped_device = device
|
||||
out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
|
||||
|
||||
# We can only loopback mount raw images. If the device isn't there,
|
||||
# it's normally because it's a .vmdk or a .vdi etc
|
||||
if not os.path.exists(mapped_device):
|
||||
raise exception.Error('Mapped device was not found (we can'
|
||||
' only inject raw disk images): %s' %
|
||||
mapped_device)
|
||||
|
||||
# Configure ext2fs so that it doesn't auto-check every N boots
|
||||
out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
# mount loopback to dir
|
||||
out, err = yield execute(
|
||||
out, err = execute(
|
||||
'sudo mount %s %s' % (mapped_device, tmpdir))
|
||||
if err:
|
||||
raise exception.Error('Failed to mount filesystem: %s' % err)
|
||||
raise exception.Error(_('Failed to mount filesystem: %s')
|
||||
% err)
|
||||
|
||||
try:
|
||||
if key:
|
||||
# inject key file
|
||||
yield _inject_key_into_fs(key, tmpdir, execute=execute)
|
||||
_inject_key_into_fs(key, tmpdir, execute=execute)
|
||||
if net:
|
||||
yield _inject_net_into_fs(net, tmpdir, execute=execute)
|
||||
_inject_net_into_fs(net, tmpdir, execute=execute)
|
||||
finally:
|
||||
# unmount device
|
||||
yield execute('sudo umount %s' % mapped_device)
|
||||
execute('sudo umount %s' % mapped_device)
|
||||
finally:
|
||||
# remove temporary directory
|
||||
yield execute('rmdir %s' % tmpdir)
|
||||
execute('rmdir %s' % tmpdir)
|
||||
if not partition is None:
|
||||
# remove partitions
|
||||
yield execute('sudo kpartx -d %s' % device)
|
||||
execute('sudo kpartx -d %s' % device)
|
||||
finally:
|
||||
# remove loopback
|
||||
yield execute('sudo losetup -d %s' % device)
|
||||
execute('sudo losetup --detach %s' % device)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _inject_key_into_fs(key, fs, execute=None):
|
||||
"""Add the given public ssh key to root's authorized_keys.
|
||||
|
||||
key is an ssh key string.
|
||||
fs is the path to the base of the filesystem into which to inject the key.
|
||||
"""
|
||||
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
|
||||
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
|
||||
yield execute('sudo chown root %s' % sshdir)
|
||||
yield execute('sudo chmod 700 %s' % sshdir)
|
||||
sshdir = os.path.join(fs, 'root', '.ssh')
|
||||
execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
|
||||
execute('sudo chown root %s' % sshdir)
|
||||
execute('sudo chmod 700 %s' % sshdir)
|
||||
keyfile = os.path.join(sshdir, 'authorized_keys')
|
||||
yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
|
||||
execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _inject_net_into_fs(net, fs, execute=None):
|
||||
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
|
||||
|
||||
net is the contents of /etc/network/interfaces.
|
||||
"""
|
||||
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
|
||||
yield execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
|
||||
yield execute('sudo chown root:root %s' % netdir)
|
||||
yield execute('sudo chmod 755 %s' % netdir)
|
||||
execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
|
||||
execute('sudo chown root:root %s' % netdir)
|
||||
execute('sudo chmod 755 %s' % netdir)
|
||||
netfile = os.path.join(netdir, 'interfaces')
|
||||
yield execute('sudo tee %s' % netfile, net)
|
||||
execute('sudo tee %s' % netfile, net)
|
||||
|
@ -38,7 +38,8 @@ def get_by_type(instance_type):
|
||||
if instance_type is None:
|
||||
return FLAGS.default_instance_type
|
||||
if instance_type not in INSTANCE_TYPES:
|
||||
raise exception.ApiError("Unknown instance type: %s" % instance_type)
|
||||
raise exception.ApiError(_("Unknown instance type: %s"),
|
||||
instance_type)
|
||||
return instance_type
|
||||
|
||||
|
||||
|
@ -37,11 +37,10 @@ terminating it.
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import manager
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.compute import power_state
|
||||
|
||||
@ -50,6 +49,8 @@ flags.DEFINE_string('instances_path', '$state_path/instances',
|
||||
'where instances are stored on disk')
|
||||
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
|
||||
'Driver to use for controlling virtualization')
|
||||
flags.DEFINE_string('stub_network', False,
|
||||
'Stub network related code')
|
||||
|
||||
|
||||
class ComputeManager(manager.Manager):
|
||||
@ -67,6 +68,12 @@ class ComputeManager(manager.Manager):
|
||||
self.volume_manager = utils.import_object(FLAGS.volume_manager)
|
||||
super(ComputeManager, self).__init__(*args, **kwargs)
|
||||
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a
|
||||
standalone service.
|
||||
"""
|
||||
self.driver.init_host()
|
||||
|
||||
def _update_state(self, context, instance_id):
|
||||
"""Update the state of an instance from the driver info."""
|
||||
# FIXME(ja): include other fields from state?
|
||||
@ -78,26 +85,57 @@ class ComputeManager(manager.Manager):
|
||||
state = power_state.NOSTATE
|
||||
self.db.instance_set_state(context, instance_id, state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_network_topic(self, context, **_kwargs):
|
||||
"""Retrieves the network host for a project on this host"""
|
||||
# TODO(vish): This method should be memoized. This will make
|
||||
# the call to get_network_host cheaper, so that
|
||||
# it can pas messages instead of checking the db
|
||||
# locally.
|
||||
if FLAGS.stub_network:
|
||||
host = FLAGS.network_host
|
||||
else:
|
||||
host = self.network_manager.get_network_host(context)
|
||||
return self.db.queue_get_for(context,
|
||||
FLAGS.network_topic,
|
||||
host)
|
||||
|
||||
@exception.wrap_exception
|
||||
def refresh_security_group(self, context, security_group_id, **_kwargs):
|
||||
"""This call passes stright through to the virtualization driver."""
|
||||
yield self.driver.refresh_security_group(security_group_id)
|
||||
self.driver.refresh_security_group(security_group_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def run_instance(self, context, instance_id, **_kwargs):
|
||||
"""Launch a new instance with specified options."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
if instance_ref['name'] in self.driver.list_instances():
|
||||
raise exception.Error("Instance has already been created")
|
||||
logging.debug("instance %s: starting...", instance_id)
|
||||
self.network_manager.setup_compute_network(context, instance_id)
|
||||
raise exception.Error(_("Instance has already been created"))
|
||||
logging.debug(_("instance %s: starting..."), instance_id)
|
||||
self.db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': self.host})
|
||||
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'networking')
|
||||
|
||||
is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id
|
||||
# NOTE(vish): This could be a cast because we don't do anything
|
||||
# with the address currently, but I'm leaving it as
|
||||
# a call to ensure that network setup completes. We
|
||||
# will eventually also need to save the address here.
|
||||
if not FLAGS.stub_network:
|
||||
address = rpc.call(context,
|
||||
self.get_network_topic(context),
|
||||
{"method": "allocate_fixed_ip",
|
||||
"args": {"instance_id": instance_id,
|
||||
"vpn": is_vpn}})
|
||||
|
||||
self.network_manager.setup_compute_network(context,
|
||||
instance_id)
|
||||
|
||||
# TODO(vish) check to make sure the availability zone matches
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
@ -105,13 +143,13 @@ class ComputeManager(manager.Manager):
|
||||
'spawning')
|
||||
|
||||
try:
|
||||
yield self.driver.spawn(instance_ref)
|
||||
self.driver.spawn(instance_ref)
|
||||
now = datetime.datetime.utcnow()
|
||||
self.db.instance_update(context,
|
||||
instance_id,
|
||||
{'launched_at': now})
|
||||
except Exception: # pylint: disable-msg=W0702
|
||||
logging.exception("instance %s: Failed to spawn",
|
||||
logging.exception(_("instance %s: Failed to spawn"),
|
||||
instance_ref['name'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
@ -119,104 +157,231 @@ class ComputeManager(manager.Manager):
|
||||
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def terminate_instance(self, context, instance_id):
|
||||
"""Terminate an instance on this machine."""
|
||||
context = context.elevated()
|
||||
logging.debug("instance %s: terminating", instance_id)
|
||||
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
if not FLAGS.stub_network:
|
||||
address = self.db.instance_get_floating_address(context,
|
||||
instance_ref['id'])
|
||||
if address:
|
||||
logging.debug(_("Disassociating address %s") % address)
|
||||
# NOTE(vish): Right now we don't really care if the ip is
|
||||
# disassociated. We may need to worry about
|
||||
# checking this later.
|
||||
rpc.cast(context,
|
||||
self.get_network_topic(context),
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"floating_address": address}})
|
||||
|
||||
address = self.db.instance_get_fixed_address(context,
|
||||
instance_ref['id'])
|
||||
if address:
|
||||
logging.debug(_("Deallocating address %s") % address)
|
||||
# NOTE(vish): Currently, nothing needs to be done on the
|
||||
# network node until release. If this changes,
|
||||
# we will need to cast here.
|
||||
self.network_manager.deallocate_fixed_ip(context.elevated(),
|
||||
address)
|
||||
|
||||
logging.debug(_("instance %s: terminating"), instance_id)
|
||||
|
||||
volumes = instance_ref.get('volumes', []) or []
|
||||
for volume in volumes:
|
||||
self.detach_volume(context, instance_id, volume['id'])
|
||||
if instance_ref['state'] == power_state.SHUTOFF:
|
||||
self.db.instance_destroy(context, instance_id)
|
||||
raise exception.Error('trying to destroy already destroyed'
|
||||
' instance: %s' % instance_id)
|
||||
yield self.driver.destroy(instance_ref)
|
||||
raise exception.Error(_('trying to destroy already destroyed'
|
||||
' instance: %s') % instance_id)
|
||||
self.driver.destroy(instance_ref)
|
||||
|
||||
# TODO(ja): should we keep it in a terminated state for a bit?
|
||||
self.db.instance_destroy(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def reboot_instance(self, context, instance_id):
|
||||
"""Reboot an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
self._update_state(context, instance_id)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
if instance_ref['state'] != power_state.RUNNING:
|
||||
logging.warn('trying to reboot a non-running '
|
||||
'instance: %s (state: %s excepted: %s)',
|
||||
logging.warn(_('trying to reboot a non-running '
|
||||
'instance: %s (state: %s excepted: %s)'),
|
||||
instance_ref['internal_id'],
|
||||
instance_ref['state'],
|
||||
power_state.RUNNING)
|
||||
|
||||
logging.debug('instance %s: rebooting', instance_ref['name'])
|
||||
logging.debug(_('instance %s: rebooting'), instance_ref['name'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'rebooting')
|
||||
yield self.driver.reboot(instance_ref)
|
||||
self.network_manager.setup_compute_network(context, instance_id)
|
||||
self.driver.reboot(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def snapshot_instance(self, context, instance_id, name):
|
||||
"""Snapshot an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
#NOTE(sirp): update_state currently only refreshes the state field
|
||||
# if we add is_snapshotting, we will need this refreshed too,
|
||||
# potentially?
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
|
||||
if instance_ref['state'] != power_state.RUNNING:
|
||||
logging.warn(_('trying to snapshot a non-running '
|
||||
'instance: %s (state: %s excepted: %s)'),
|
||||
instance_ref['internal_id'],
|
||||
instance_ref['state'],
|
||||
power_state.RUNNING)
|
||||
|
||||
self.driver.snapshot(instance_ref, name)
|
||||
|
||||
@exception.wrap_exception
|
||||
def rescue_instance(self, context, instance_id):
|
||||
"""Rescue an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: rescuing',
|
||||
logging.debug(_('instance %s: rescuing'),
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'rescuing')
|
||||
yield self.driver.rescue(instance_ref)
|
||||
self.network_manager.setup_compute_network(context, instance_id)
|
||||
self.driver.rescue(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def unrescue_instance(self, context, instance_id):
|
||||
"""Rescue an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: unrescuing',
|
||||
logging.debug(_('instance %s: unrescuing'),
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'unrescuing')
|
||||
yield self.driver.unrescue(instance_ref)
|
||||
self.driver.unrescue(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@staticmethod
|
||||
def _update_state_callback(self, context, instance_id, result):
|
||||
"""Update instance state when async task completes."""
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@exception.wrap_exception
|
||||
def pause_instance(self, context, instance_id):
|
||||
"""Pause an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: pausing',
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'pausing')
|
||||
self.driver.pause(instance_ref,
|
||||
lambda result: self._update_state_callback(self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
|
||||
@exception.wrap_exception
|
||||
def unpause_instance(self, context, instance_id):
|
||||
"""Unpause a paused instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: unpausing',
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'unpausing')
|
||||
self.driver.unpause(instance_ref,
|
||||
lambda result: self._update_state_callback(self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
|
||||
@exception.wrap_exception
|
||||
def get_diagnostics(self, context, instance_id):
|
||||
"""Retrieve diagnostics for an instance on this server."""
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
if instance_ref["state"] == power_state.RUNNING:
|
||||
logging.debug(_("instance %s: retrieving diagnostics"),
|
||||
instance_ref["internal_id"])
|
||||
return self.driver.get_diagnostics(instance_ref)
|
||||
|
||||
@exception.wrap_exception
|
||||
def suspend_instance(self, context, instance_id):
|
||||
"""suspend the instance with instance_id"""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug(_('instance %s: suspending'),
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context, instance_id,
|
||||
power_state.NOSTATE,
|
||||
'suspending')
|
||||
self.driver.suspend(instance_ref,
|
||||
lambda result: self._update_state_callback(self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
|
||||
@exception.wrap_exception
|
||||
def resume_instance(self, context, instance_id):
|
||||
"""resume the suspended instance with instance_id"""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context, instance_id,
|
||||
power_state.NOSTATE,
|
||||
'resuming')
|
||||
self.driver.resume(instance_ref,
|
||||
lambda result: self._update_state_callback(self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
|
||||
@exception.wrap_exception
|
||||
def get_console_output(self, context, instance_id):
|
||||
"""Send the console output for an instance."""
|
||||
context = context.elevated()
|
||||
logging.debug("instance %s: getting console output", instance_id)
|
||||
logging.debug(_("instance %s: getting console output"), instance_id)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
return self.driver.get_console_output(instance_ref)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def attach_volume(self, context, instance_id, volume_id, mountpoint):
|
||||
"""Attach a volume to an instance."""
|
||||
context = context.elevated()
|
||||
logging.debug("instance %s: attaching volume %s to %s", instance_id,
|
||||
logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
|
||||
volume_id, mountpoint)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
dev_path = yield self.volume_manager.setup_compute_volume(context,
|
||||
volume_id)
|
||||
dev_path = self.volume_manager.setup_compute_volume(context,
|
||||
volume_id)
|
||||
try:
|
||||
yield self.driver.attach_volume(instance_ref['name'],
|
||||
dev_path,
|
||||
mountpoint)
|
||||
self.driver.attach_volume(instance_ref['name'],
|
||||
dev_path,
|
||||
mountpoint)
|
||||
self.db.volume_attached(context,
|
||||
volume_id,
|
||||
instance_id,
|
||||
@ -225,29 +390,29 @@ class ComputeManager(manager.Manager):
|
||||
# NOTE(vish): The inline callback eats the exception info so we
|
||||
# log the traceback here and reraise the same
|
||||
# ecxception below.
|
||||
logging.exception("instance %s: attach failed %s, removing",
|
||||
logging.exception(_("instance %s: attach failed %s, removing"),
|
||||
instance_id, mountpoint)
|
||||
yield self.volume_manager.remove_compute_volume(context,
|
||||
volume_id)
|
||||
self.volume_manager.remove_compute_volume(context,
|
||||
volume_id)
|
||||
raise exc
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
return True
|
||||
|
||||
@exception.wrap_exception
|
||||
def detach_volume(self, context, instance_id, volume_id):
|
||||
"""Detach a volume from an instance."""
|
||||
context = context.elevated()
|
||||
logging.debug("instance %s: detaching volume %s",
|
||||
logging.debug(_("instance %s: detaching volume %s"),
|
||||
instance_id,
|
||||
volume_id)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
if instance_ref['name'] not in self.driver.list_instances():
|
||||
logging.warn("Detaching volume from unknown instance %s",
|
||||
logging.warn(_("Detaching volume from unknown instance %s"),
|
||||
instance_ref['name'])
|
||||
else:
|
||||
yield self.driver.detach_volume(instance_ref['name'],
|
||||
volume_ref['mountpoint'])
|
||||
yield self.volume_manager.remove_compute_volume(context, volume_id)
|
||||
self.driver.detach_volume(instance_ref['name'],
|
||||
volume_ref['mountpoint'])
|
||||
self.volume_manager.remove_compute_volume(context, volume_id)
|
||||
self.db.volume_detached(context, volume_id)
|
||||
defer.returnValue(True)
|
||||
return True
|
||||
|
@ -255,7 +255,7 @@ class Instance(object):
|
||||
Updates the instances statistics and stores the resulting graphs
|
||||
in the internal object store on the cloud controller.
|
||||
"""
|
||||
logging.debug('updating %s...', self.instance_id)
|
||||
logging.debug(_('updating %s...'), self.instance_id)
|
||||
|
||||
try:
|
||||
data = self.fetch_cpu_stats()
|
||||
@ -285,7 +285,7 @@ class Instance(object):
|
||||
graph_disk(self, '1w')
|
||||
graph_disk(self, '1m')
|
||||
except Exception:
|
||||
logging.exception('unexpected error during update')
|
||||
logging.exception(_('unexpected error during update'))
|
||||
|
||||
self.last_updated = utcnow()
|
||||
|
||||
@ -351,7 +351,7 @@ class Instance(object):
|
||||
rd += rd_bytes
|
||||
wr += wr_bytes
|
||||
except TypeError:
|
||||
logging.error('Cannot get blockstats for "%s" on "%s"',
|
||||
logging.error(_('Cannot get blockstats for "%s" on "%s"'),
|
||||
disk, self.instance_id)
|
||||
raise
|
||||
|
||||
@ -373,7 +373,7 @@ class Instance(object):
|
||||
rx += stats[0]
|
||||
tx += stats[4]
|
||||
except TypeError:
|
||||
logging.error('Cannot get ifstats for "%s" on "%s"',
|
||||
logging.error(_('Cannot get ifstats for "%s" on "%s"'),
|
||||
interface, self.instance_id)
|
||||
raise
|
||||
|
||||
@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service):
|
||||
try:
|
||||
conn = virt_connection.get_connection(read_only=True)
|
||||
except Exception, exn:
|
||||
logging.exception('unexpected exception getting connection')
|
||||
logging.exception(_('unexpected exception getting connection'))
|
||||
time.sleep(FLAGS.monitoring_instances_delay)
|
||||
return
|
||||
|
||||
@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service):
|
||||
if not domain_id in self._instances:
|
||||
instance = Instance(conn, domain_id)
|
||||
self._instances[domain_id] = instance
|
||||
logging.debug('Found instance: %s', domain_id)
|
||||
logging.debug(_('Found instance: %s'), domain_id)
|
||||
|
||||
for key in self._instances.keys():
|
||||
instance = self._instances[key]
|
||||
|
@ -26,6 +26,7 @@ PAUSED = 0x03
|
||||
SHUTDOWN = 0x04
|
||||
SHUTOFF = 0x05
|
||||
CRASHED = 0x06
|
||||
SUSPENDED = 0x07
|
||||
|
||||
|
||||
def name(code):
|
||||
@ -36,5 +37,6 @@ def name(code):
|
||||
PAUSED: 'paused',
|
||||
SHUTDOWN: 'shutdown',
|
||||
SHUTOFF: 'shutdown',
|
||||
CRASHED: 'crashed'}
|
||||
CRASHED: 'crashed',
|
||||
SUSPENDED: 'suspended'}
|
||||
return d[code]
|
||||
|
196
nova/crypto.py
196
nova/crypto.py
@ -19,10 +19,10 @@
|
||||
Wrappers around standard crypto data elements.
|
||||
|
||||
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
|
||||
|
||||
"""
|
||||
|
||||
import base64
|
||||
import gettext
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
@ -34,28 +34,59 @@ import utils
|
||||
|
||||
import M2Crypto
|
||||
|
||||
from nova import exception
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
|
||||
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
|
||||
flags.DEFINE_string('key_file',
|
||||
os.path.join('private', 'cakey.pem'),
|
||||
_('Filename of private key'))
|
||||
flags.DEFINE_string('crl_file', 'crl.pem',
|
||||
_('Filename of root Certificate Revokation List'))
|
||||
flags.DEFINE_string('keys_path', '$state_path/keys',
|
||||
'Where we keep our keys')
|
||||
_('Where we keep our keys'))
|
||||
flags.DEFINE_string('ca_path', '$state_path/CA',
|
||||
'Where we keep our root CA')
|
||||
flags.DEFINE_boolean('use_intermediate_ca', False,
|
||||
'Should we use intermediate CAs for each project?')
|
||||
_('Where we keep our root CA'))
|
||||
flags.DEFINE_boolean('use_project_ca', False,
|
||||
_('Should we use a CA for each project?'))
|
||||
flags.DEFINE_string('user_cert_subject',
|
||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
||||
'OU=NovaDev/CN=%s-%s-%s',
|
||||
_('Subject for certificate for users, '
|
||||
'%s for project, user, timestamp'))
|
||||
flags.DEFINE_string('project_cert_subject',
|
||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
||||
'OU=NovaDev/CN=project-ca-%s-%s',
|
||||
_('Subject for certificate for projects, '
|
||||
'%s for project, timestamp'))
|
||||
flags.DEFINE_string('vpn_cert_subject',
|
||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
||||
'OU=NovaDev/CN=project-vpn-%s-%s',
|
||||
_('Subject for certificate for vpns, '
|
||||
'%s for project, timestamp'))
|
||||
|
||||
|
||||
def ca_path(project_id):
|
||||
if project_id:
|
||||
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
|
||||
return "%s/cacert.pem" % (FLAGS.ca_path)
|
||||
def ca_folder(project_id=None):
|
||||
if FLAGS.use_project_ca and project_id:
|
||||
return os.path.join(FLAGS.ca_path, 'projects', project_id)
|
||||
return FLAGS.ca_path
|
||||
|
||||
|
||||
def ca_path(project_id=None):
|
||||
return os.path.join(ca_folder(project_id), FLAGS.ca_file)
|
||||
|
||||
|
||||
def key_path(project_id=None):
|
||||
return os.path.join(ca_folder(project_id), FLAGS.key_file)
|
||||
|
||||
|
||||
def fetch_ca(project_id=None, chain=True):
|
||||
if not FLAGS.use_intermediate_ca:
|
||||
if not FLAGS.use_project_ca:
|
||||
project_id = None
|
||||
buffer = ""
|
||||
if project_id:
|
||||
@ -92,8 +123,8 @@ def generate_key_pair(bits=1024):
|
||||
|
||||
|
||||
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
|
||||
pub_key_buffer = M2Crypto.BIO.MemoryBuffer(ssl_public_key)
|
||||
rsa_key = M2Crypto.RSA.load_pub_key_bio(pub_key_buffer)
|
||||
buf = M2Crypto.BIO.MemoryBuffer(ssl_public_key)
|
||||
rsa_key = M2Crypto.RSA.load_pub_key_bio(buf)
|
||||
e, n = rsa_key.pub()
|
||||
|
||||
key_type = 'ssh-rsa'
|
||||
@ -106,53 +137,134 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
|
||||
return '%s %s %s@%s\n' % (key_type, b64_blob, name, suffix)
|
||||
|
||||
|
||||
def generate_x509_cert(subject, bits=1024):
|
||||
def revoke_cert(project_id, file_name):
|
||||
"""Revoke a cert by file name"""
|
||||
start = os.getcwd()
|
||||
os.chdir(ca_folder(project_id))
|
||||
# NOTE(vish): potential race condition here
|
||||
utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name)
|
||||
utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" %
|
||||
FLAGS.crl_file)
|
||||
os.chdir(start)
|
||||
|
||||
|
||||
def revoke_certs_by_user(user_id):
|
||||
"""Revoke all user certs"""
|
||||
admin = context.get_admin_context()
|
||||
for cert in db.certificate_get_all_by_user(admin, user_id):
|
||||
revoke_cert(cert['project_id'], cert['file_name'])
|
||||
|
||||
|
||||
def revoke_certs_by_project(project_id):
|
||||
"""Revoke all project certs"""
|
||||
# NOTE(vish): This is somewhat useless because we can just shut down
|
||||
# the vpn.
|
||||
admin = context.get_admin_context()
|
||||
for cert in db.certificate_get_all_by_project(admin, project_id):
|
||||
revoke_cert(cert['project_id'], cert['file_name'])
|
||||
|
||||
|
||||
def revoke_certs_by_user_and_project(user_id, project_id):
|
||||
"""Revoke certs for user in project"""
|
||||
admin = context.get_admin_context()
|
||||
for cert in db.certificate_get_all_by_user(admin, user_id, project_id):
|
||||
revoke_cert(cert['project_id'], cert['file_name'])
|
||||
|
||||
|
||||
def _project_cert_subject(project_id):
|
||||
"""Helper to generate user cert subject"""
|
||||
return FLAGS.project_cert_subject % (project_id, utils.isotime())
|
||||
|
||||
|
||||
def _vpn_cert_subject(project_id):
|
||||
"""Helper to generate user cert subject"""
|
||||
return FLAGS.vpn_cert_subject % (project_id, utils.isotime())
|
||||
|
||||
|
||||
def _user_cert_subject(user_id, project_id):
|
||||
"""Helper to generate user cert subject"""
|
||||
return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime())
|
||||
|
||||
|
||||
def generate_x509_cert(user_id, project_id, bits=1024):
|
||||
"""Generate and sign a cert for user in project"""
|
||||
subject = _user_cert_subject(user_id, project_id)
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
|
||||
csrfile = os.path.join(tmpdir, 'temp.csr')
|
||||
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
|
||||
utils.runthis("Generating private key: %s",
|
||||
"openssl genrsa -out %s %s" % (keyfile, bits))
|
||||
utils.runthis("Generating CSR: %s",
|
||||
"openssl req -new -key %s -out %s -batch -subj %s" %
|
||||
utils.execute("openssl genrsa -out %s %s" % (keyfile, bits))
|
||||
utils.execute("openssl req -new -key %s -out %s -batch -subj %s" %
|
||||
(keyfile, csrfile, subject))
|
||||
private_key = open(keyfile).read()
|
||||
csr = open(csrfile).read()
|
||||
shutil.rmtree(tmpdir)
|
||||
return (private_key, csr)
|
||||
(serial, signed_csr) = sign_csr(csr, project_id)
|
||||
fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % serial)
|
||||
cert = {'user_id': user_id,
|
||||
'project_id': project_id,
|
||||
'file_name': fname}
|
||||
db.certificate_create(context.get_admin_context(), cert)
|
||||
return (private_key, signed_csr)
|
||||
|
||||
|
||||
def sign_csr(csr_text, intermediate=None):
|
||||
if not FLAGS.use_intermediate_ca:
|
||||
intermediate = None
|
||||
if not intermediate:
|
||||
return _sign_csr(csr_text, FLAGS.ca_path)
|
||||
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
|
||||
if not os.path.exists(user_ca):
|
||||
def _ensure_project_folder(project_id):
|
||||
if not os.path.exists(ca_path(project_id)):
|
||||
start = os.getcwd()
|
||||
os.chdir(FLAGS.ca_path)
|
||||
utils.runthis("Generating intermediate CA: %s",
|
||||
"sh geninter.sh %s" % (intermediate))
|
||||
os.chdir(ca_folder())
|
||||
utils.execute("sh geninter.sh %s %s" %
|
||||
(project_id, _project_cert_subject(project_id)))
|
||||
os.chdir(start)
|
||||
return _sign_csr(csr_text, user_ca)
|
||||
|
||||
|
||||
def generate_vpn_files(project_id):
|
||||
project_folder = ca_folder(project_id)
|
||||
csr_fn = os.path.join(project_folder, "server.csr")
|
||||
crt_fn = os.path.join(project_folder, "server.crt")
|
||||
|
||||
if os.path.exists(crt_fn):
|
||||
return
|
||||
_ensure_project_folder(project_id)
|
||||
start = os.getcwd()
|
||||
os.chdir(ca_folder())
|
||||
# TODO(vish): the shell scripts could all be done in python
|
||||
utils.execute("sh genvpn.sh %s %s" %
|
||||
(project_id, _vpn_cert_subject(project_id)))
|
||||
with open(csr_fn, "r") as csrfile:
|
||||
csr_text = csrfile.read()
|
||||
(serial, signed_csr) = sign_csr(csr_text, project_id)
|
||||
with open(crt_fn, "w") as crtfile:
|
||||
crtfile.write(signed_csr)
|
||||
os.chdir(start)
|
||||
|
||||
|
||||
def sign_csr(csr_text, project_id=None):
|
||||
if not FLAGS.use_project_ca:
|
||||
project_id = None
|
||||
if not project_id:
|
||||
return _sign_csr(csr_text, ca_folder())
|
||||
_ensure_project_folder(project_id)
|
||||
project_folder = ca_folder(project_id)
|
||||
return _sign_csr(csr_text, ca_folder(project_id))
|
||||
|
||||
|
||||
def _sign_csr(csr_text, ca_folder):
|
||||
tmpfolder = tempfile.mkdtemp()
|
||||
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
|
||||
inbound = os.path.join(tmpfolder, "inbound.csr")
|
||||
outbound = os.path.join(tmpfolder, "outbound.csr")
|
||||
csrfile = open(inbound, "w")
|
||||
csrfile.write(csr_text)
|
||||
csrfile.close()
|
||||
logging.debug("Flags path: %s" % ca_folder)
|
||||
logging.debug(_("Flags path: %s") % ca_folder)
|
||||
start = os.getcwd()
|
||||
# Change working dir to CA
|
||||
os.chdir(ca_folder)
|
||||
utils.runthis("Signing cert: %s",
|
||||
"openssl ca -batch -out %s/outbound.crt "
|
||||
"-config ./openssl.cnf -infiles %s/inbound.csr" %
|
||||
(tmpfolder, tmpfolder))
|
||||
utils.execute("openssl ca -batch -out %s -config "
|
||||
"./openssl.cnf -infiles %s" % (outbound, inbound))
|
||||
out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound)
|
||||
serial = out.rpartition("=")[2]
|
||||
os.chdir(start)
|
||||
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
|
||||
return crtfile.read()
|
||||
with open(outbound, "r") as crtfile:
|
||||
return (serial, crtfile.read())
|
||||
|
||||
|
||||
def mkreq(bits, subject="foo", ca=0):
|
||||
@ -160,8 +272,7 @@ def mkreq(bits, subject="foo", ca=0):
|
||||
req = M2Crypto.X509.Request()
|
||||
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
|
||||
pk.assign_rsa(rsa)
|
||||
# Should not be freed here
|
||||
rsa = None
|
||||
rsa = None # should not be freed here
|
||||
req.set_pubkey(pk)
|
||||
req.set_subject(subject)
|
||||
req.sign(pk, 'sha512')
|
||||
@ -225,7 +336,6 @@ def mkcacert(subject='nova', years=1):
|
||||
# IN THE SOFTWARE.
|
||||
# http://code.google.com/p/boto
|
||||
|
||||
|
||||
def compute_md5(fp):
|
||||
"""
|
||||
:type fp: file
|
||||
|
@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`.
|
||||
|
||||
:sql_connection: string specifying the sqlalchemy connection to use, like:
|
||||
`sqlite:///var/lib/nova/nova.sqlite`.
|
||||
|
||||
:enable_new_services: when adding a new service to the database, is it in the
|
||||
pool of available hardware (Default: True)
|
||||
"""
|
||||
|
||||
from nova import exception
|
||||
@ -37,6 +40,8 @@ from nova import utils
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('db_backend', 'sqlalchemy',
|
||||
'The backend to use for db')
|
||||
flags.DEFINE_boolean('enable_new_services', True,
|
||||
'Services to be added to the available pool on create')
|
||||
|
||||
|
||||
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
|
||||
@ -130,6 +135,45 @@ def service_update(context, service_id, values):
|
||||
###################
|
||||
|
||||
|
||||
def certificate_create(context, values):
|
||||
"""Create a certificate from the values dictionary."""
|
||||
return IMPL.certificate_create(context, values)
|
||||
|
||||
|
||||
def certificate_destroy(context, certificate_id):
|
||||
"""Destroy the certificate or raise if it does not exist."""
|
||||
return IMPL.certificate_destroy(context, certificate_id)
|
||||
|
||||
|
||||
def certificate_get_all_by_project(context, project_id):
|
||||
"""Get all certificates for a project."""
|
||||
return IMPL.certificate_get_all_by_project(context, project_id)
|
||||
|
||||
|
||||
def certificate_get_all_by_user(context, user_id):
|
||||
"""Get all certificates for a user."""
|
||||
return IMPL.certificate_get_all_by_user(context, user_id)
|
||||
|
||||
|
||||
def certificate_get_all_by_user_and_project(context, user_id, project_id):
|
||||
"""Get all certificates for a user and project."""
|
||||
return IMPL.certificate_get_all_by_user_and_project(context,
|
||||
user_id,
|
||||
project_id)
|
||||
|
||||
|
||||
def certificate_update(context, certificate_id, values):
|
||||
"""Set the given properties on an certificate and update it.
|
||||
|
||||
Raises NotFound if service does not exist.
|
||||
|
||||
"""
|
||||
return IMPL.service_update(context, certificate_id, values)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
def floating_ip_allocate_address(context, host, project_id):
|
||||
"""Allocate free floating ip and return the address.
|
||||
|
||||
@ -310,6 +354,11 @@ def instance_get_floating_address(context, instance_id):
|
||||
return IMPL.instance_get_floating_address(context, instance_id)
|
||||
|
||||
|
||||
def instance_get_project_vpn(context, project_id):
|
||||
"""Get a vpn instance by project or return None."""
|
||||
return IMPL.instance_get_project_vpn(context, project_id)
|
||||
|
||||
|
||||
def instance_get_by_internal_id(context, internal_id):
|
||||
"""Get an instance by internal id."""
|
||||
return IMPL.instance_get_by_internal_id(context, internal_id)
|
||||
@ -340,6 +389,16 @@ def instance_add_security_group(context, instance_id, security_group_id):
|
||||
security_group_id)
|
||||
|
||||
|
||||
def instance_action_create(context, values):
|
||||
"""Create an instance action from the values dictionary."""
|
||||
return IMPL.instance_action_create(context, values)
|
||||
|
||||
|
||||
def instance_get_actions(context, instance_id):
|
||||
"""Get instance actions by instance id."""
|
||||
return IMPL.instance_get_actions(context, instance_id)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@ -474,12 +533,14 @@ def network_update(context, network_id, values):
|
||||
###################
|
||||
|
||||
|
||||
def project_get_network(context, project_id):
|
||||
def project_get_network(context, project_id, associate=True):
|
||||
"""Return the network associated with the project.
|
||||
|
||||
Raises NotFound if no such network can be found.
|
||||
If associate is true, it will attempt to associate a new
|
||||
network if one is not found, otherwise it returns None.
|
||||
|
||||
"""
|
||||
|
||||
return IMPL.project_get_network(context, project_id)
|
||||
|
||||
def project_get_network_v6(context, project_id):
|
||||
|
@ -19,6 +19,25 @@
|
||||
"""
|
||||
SQLAlchemy database backend
|
||||
"""
|
||||
import logging
|
||||
import time
|
||||
|
||||
from sqlalchemy.exc import OperationalError
|
||||
|
||||
from nova import flags
|
||||
from nova.db.sqlalchemy import models
|
||||
|
||||
models.register_models()
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
for i in xrange(FLAGS.sql_max_retries):
|
||||
if i > 0:
|
||||
time.sleep(FLAGS.sql_retry_interval)
|
||||
|
||||
try:
|
||||
models.register_models()
|
||||
break
|
||||
except OperationalError:
|
||||
logging.exception(_("Data store is unreachable."
|
||||
" Trying again in %d seconds.") % FLAGS.sql_retry_interval)
|
||||
|
@ -41,7 +41,7 @@ FLAGS = flags.FLAGS
|
||||
def is_admin_context(context):
|
||||
"""Indicates if the request context is an administrator."""
|
||||
if not context:
|
||||
warnings.warn('Use of empty request context is deprecated',
|
||||
warnings.warn(_('Use of empty request context is deprecated'),
|
||||
DeprecationWarning)
|
||||
raise Exception('die')
|
||||
return context.is_admin
|
||||
@ -130,7 +130,7 @@ def service_get(context, service_id, session=None):
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('No service for id %s' % service_id)
|
||||
raise exception.NotFound(_('No service for id %s') % service_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary):
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No service for %s, %s' % (host, binary))
|
||||
raise exception.NotFound(_('No service for %s, %s') % (host, binary))
|
||||
|
||||
return result
|
||||
|
||||
@ -236,6 +236,8 @@ def service_get_by_args(context, host, binary):
|
||||
def service_create(context, values):
|
||||
service_ref = models.Service()
|
||||
service_ref.update(values)
|
||||
if not FLAGS.enable_new_services:
|
||||
service_ref.disabled = True
|
||||
service_ref.save()
|
||||
return service_ref
|
||||
|
||||
@ -252,6 +254,84 @@ def service_update(context, service_id, values):
|
||||
###################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_get(context, certificate_id, session=None):
|
||||
if not session:
|
||||
session = get_session()
|
||||
|
||||
result = session.query(models.Certificate).\
|
||||
filter_by(id=certificate_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('No certificate for id %s' % certificate_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_create(context, values):
|
||||
certificate_ref = models.Certificate()
|
||||
for (key, value) in values.iteritems():
|
||||
certificate_ref[key] = value
|
||||
certificate_ref.save()
|
||||
return certificate_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_destroy(context, certificate_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
certificate_ref = certificate_get(context,
|
||||
certificate_id,
|
||||
session=session)
|
||||
certificate_ref.delete(session=session)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_get_all_by_project(context, project_id):
|
||||
session = get_session()
|
||||
return session.query(models.Certificate).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_get_all_by_user(context, user_id):
|
||||
session = get_session()
|
||||
return session.query(models.Certificate).\
|
||||
filter_by(user_id=user_id).\
|
||||
filter_by(deleted=False).\
|
||||
all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_get_all_by_user_and_project(_context, user_id, project_id):
|
||||
session = get_session()
|
||||
return session.query(models.Certificate).\
|
||||
filter_by(user_id=user_id).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def certificate_update(context, certificate_id, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
certificate_ref = certificate_get(context,
|
||||
certificate_id,
|
||||
session=session)
|
||||
for (key, value) in values.iteritems():
|
||||
certificate_ref[key] = value
|
||||
certificate_ref.save(session=session)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@require_context
|
||||
def floating_ip_allocate_address(context, host, project_id):
|
||||
authorize_project_context(context, project_id)
|
||||
@ -385,6 +465,7 @@ def floating_ip_get_by_address(context, address, session=None):
|
||||
session = get_session()
|
||||
|
||||
result = session.query(models.FloatingIp).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
filter_by(address=address).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
@ -491,7 +572,7 @@ def fixed_ip_get_by_address(context, address, session=None):
|
||||
options(joinedload('instance')).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No floating ip for address %s' % address)
|
||||
raise exception.NotFound(_('No floating ip for address %s') % address)
|
||||
|
||||
if is_user_context(context):
|
||||
authorize_project_context(context, result.instance.project_id)
|
||||
@ -538,6 +619,8 @@ def fixed_ip_update(context, address, values):
|
||||
#TODO(gundlach): instance_create and volume_create are nearly identical
|
||||
#and should be refactored. I expect there are other copy-and-paste
|
||||
#functions between the two of them as well.
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_create(context, values):
|
||||
"""Create a new Instance record in the database.
|
||||
@ -589,19 +672,23 @@ def instance_get(context, instance_id, session=None):
|
||||
|
||||
if is_admin_context(context):
|
||||
result = session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload('volumes')).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
elif is_user_context(context):
|
||||
result = session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload('volumes')).\
|
||||
filter_by(project_id=context.project_id).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No instance for id %s' % instance_id)
|
||||
raise exception.NotFound(_('No instance for id %s') % instance_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -661,6 +748,18 @@ def instance_get_all_by_reservation(context, reservation_id):
|
||||
all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def instance_get_project_vpn(context, project_id):
|
||||
session = get_session()
|
||||
return session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(image_id=FLAGS.vpn_image_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_get_by_internal_id(context, internal_id):
|
||||
session = get_session()
|
||||
@ -679,7 +778,7 @@ def instance_get_by_internal_id(context, internal_id):
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('Instance %s not found' % (internal_id))
|
||||
raise exception.NotFound(_('Instance %s not found') % (internal_id))
|
||||
|
||||
return result
|
||||
|
||||
@ -766,6 +865,30 @@ def instance_add_security_group(context, instance_id, security_group_id):
|
||||
instance_ref.save(session=session)
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_action_create(context, values):
|
||||
"""Create an instance action from the values dictionary."""
|
||||
action_ref = models.InstanceActions()
|
||||
action_ref.update(values)
|
||||
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
action_ref.save(session=session)
|
||||
return action_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def instance_get_actions(context, instance_id):
|
||||
"""Return the actions associated to the given instance id"""
|
||||
session = get_session()
|
||||
actions = {}
|
||||
for action in session.query(models.InstanceActions).\
|
||||
filter_by(instance_id=instance_id).\
|
||||
all():
|
||||
actions[action.action] = action.error
|
||||
return actions
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@ -809,7 +932,7 @@ def key_pair_get(context, user_id, name, session=None):
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('no keypair for user %s, name %s' %
|
||||
raise exception.NotFound(_('no keypair for user %s, name %s') %
|
||||
(user_id, name))
|
||||
return result
|
||||
|
||||
@ -924,7 +1047,7 @@ def network_get(context, network_id, session=None):
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No network for id %s' % network_id)
|
||||
raise exception.NotFound(_('No network for id %s') % network_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -932,6 +1055,8 @@ def network_get(context, network_id, session=None):
|
||||
# NOTE(vish): pylint complains because of the long method name, but
|
||||
# it fits with the names of the rest of the methods
|
||||
# pylint: disable-msg=C0103
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def network_get_associated_fixed_ips(context, network_id):
|
||||
session = get_session()
|
||||
@ -952,7 +1077,7 @@ def network_get_by_bridge(context, bridge):
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('No network for bridge %s' % bridge)
|
||||
raise exception.NotFound(_('No network for bridge %s') % bridge)
|
||||
return result
|
||||
|
||||
|
||||
@ -966,7 +1091,7 @@ def network_get_by_instance(_context, instance_id):
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not rv:
|
||||
raise exception.NotFound('No network for instance %s' % instance_id)
|
||||
raise exception.NotFound(_('No network for instance %s') % instance_id)
|
||||
return rv
|
||||
|
||||
|
||||
@ -980,7 +1105,7 @@ def network_set_host(context, network_id, host_id):
|
||||
with_lockmode('update').\
|
||||
first()
|
||||
if not network_ref:
|
||||
raise exception.NotFound('No network for id %s' % network_id)
|
||||
raise exception.NotFound(_('No network for id %s') % network_id)
|
||||
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
@ -1004,24 +1129,26 @@ def network_update(context, network_id, values):
|
||||
|
||||
|
||||
@require_context
|
||||
def project_get_network(context, project_id):
|
||||
def project_get_network(context, project_id, associate=True):
|
||||
session = get_session()
|
||||
rv = session.query(models.Network).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not rv:
|
||||
result = session.query(models.Network).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not result:
|
||||
if not associate:
|
||||
return None
|
||||
try:
|
||||
return network_associate(context, project_id)
|
||||
except IntegrityError:
|
||||
# NOTE(vish): We hit this if there is a race and two
|
||||
# processes are attempting to allocate the
|
||||
# network at the same time
|
||||
rv = session.query(models.Network).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
return rv
|
||||
result = session.query(models.Network).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
return result
|
||||
|
||||
@require_context
|
||||
def project_get_network_v6(context, project_id):
|
||||
@ -1085,21 +1212,24 @@ def iscsi_target_create_safe(context, values):
|
||||
###################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def auth_destroy_token(_context, token):
|
||||
session = get_session()
|
||||
session.delete(token)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def auth_get_token(_context, token_hash):
|
||||
session = get_session()
|
||||
tk = session.query(models.AuthToken).\
|
||||
filter_by(token_hash=token_hash).\
|
||||
first()
|
||||
if not tk:
|
||||
raise exception.NotFound('Token %s does not exist' % token_hash)
|
||||
raise exception.NotFound(_('Token %s does not exist') % token_hash)
|
||||
return tk
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def auth_create_token(_context, token):
|
||||
tk = models.AuthToken()
|
||||
tk.update(token)
|
||||
@ -1120,7 +1250,7 @@ def quota_get(context, project_id, session=None):
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No quota for project_id %s' % project_id)
|
||||
raise exception.NotFound(_('No quota for project_id %s') % project_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -1275,7 +1405,7 @@ def volume_get(context, volume_id, session=None):
|
||||
filter_by(deleted=False).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No volume for id %s' % volume_id)
|
||||
raise exception.NotFound(_('No volume for id %s') % volume_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -1331,7 +1461,7 @@ def volume_get_by_ec2_id(context, ec2_id):
|
||||
raise exception.NotAuthorized()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('Volume %s not found' % ec2_id)
|
||||
raise exception.NotFound(_('Volume %s not found') % ec2_id)
|
||||
|
||||
return result
|
||||
|
||||
@ -1355,7 +1485,7 @@ def volume_get_instance(context, volume_id):
|
||||
options(joinedload('instance')).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('Volume %s not found' % ec2_id)
|
||||
raise exception.NotFound(_('Volume %s not found') % ec2_id)
|
||||
|
||||
return result.instance
|
||||
|
||||
@ -1367,7 +1497,7 @@ def volume_get_shelf_and_blade(context, volume_id):
|
||||
filter_by(volume_id=volume_id).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No export device found for volume %s' %
|
||||
raise exception.NotFound(_('No export device found for volume %s') %
|
||||
volume_id)
|
||||
|
||||
return (result.shelf_id, result.blade_id)
|
||||
@ -1380,7 +1510,7 @@ def volume_get_iscsi_target_num(context, volume_id):
|
||||
filter_by(volume_id=volume_id).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No target id found for volume %s' %
|
||||
raise exception.NotFound(_('No target id found for volume %s') %
|
||||
volume_id)
|
||||
|
||||
return result.target_num
|
||||
@ -1425,7 +1555,7 @@ def security_group_get(context, security_group_id, session=None):
|
||||
options(joinedload_all('rules')).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound("No secuity group with id %s" %
|
||||
raise exception.NotFound(_("No security group with id %s") %
|
||||
security_group_id)
|
||||
return result
|
||||
|
||||
@ -1442,7 +1572,7 @@ def security_group_get_by_name(context, project_id, group_name):
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound(
|
||||
'No security group named %s for project: %s' \
|
||||
_('No security group named %s for project: %s')
|
||||
% (group_name, project_id))
|
||||
return result
|
||||
|
||||
@ -1530,7 +1660,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
|
||||
filter_by(id=security_group_rule_id).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound("No secuity group rule with id %s" %
|
||||
raise exception.NotFound(_("No secuity group rule with id %s") %
|
||||
security_group_rule_id)
|
||||
return result
|
||||
|
||||
@ -1566,7 +1696,7 @@ def user_get(context, id, session=None):
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('No user for id %s' % id)
|
||||
raise exception.NotFound(_('No user for id %s') % id)
|
||||
|
||||
return result
|
||||
|
||||
@ -1582,7 +1712,7 @@ def user_get_by_access_key(context, access_key, session=None):
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound('No user for access key %s' % access_key)
|
||||
raise exception.NotFound(_('No user for access key %s') % access_key)
|
||||
|
||||
return result
|
||||
|
||||
@ -1644,7 +1774,7 @@ def project_get(context, id, session=None):
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound("No project with id %s" % id)
|
||||
raise exception.NotFound(_("No project with id %s") % id)
|
||||
|
||||
return result
|
||||
|
||||
|
@ -151,6 +151,16 @@ class Service(BASE, NovaBase):
|
||||
disabled = Column(Boolean, default=False)
|
||||
|
||||
|
||||
class Certificate(BASE, NovaBase):
|
||||
"""Represents a an x509 certificate"""
|
||||
__tablename__ = 'certificates'
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
user_id = Column(String(255))
|
||||
project_id = Column(String(255))
|
||||
file_name = Column(String(255))
|
||||
|
||||
|
||||
class Instance(BASE, NovaBase):
|
||||
"""Represents a guest vm."""
|
||||
__tablename__ = 'instances'
|
||||
@ -210,6 +220,8 @@ class Instance(BASE, NovaBase):
|
||||
launched_at = Column(DateTime)
|
||||
terminated_at = Column(DateTime)
|
||||
|
||||
availability_zone = Column(String(255))
|
||||
|
||||
# User editable field for display in user-facing UIs
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
@ -226,6 +238,16 @@ class Instance(BASE, NovaBase):
|
||||
# 'shutdown', 'shutoff', 'crashed'])
|
||||
|
||||
|
||||
class InstanceActions(BASE, NovaBase):
|
||||
"""Represents a guest VM's actions and results"""
|
||||
__tablename__ = "instance_actions"
|
||||
id = Column(Integer, primary_key=True)
|
||||
instance_id = Column(Integer, ForeignKey('instances.id'))
|
||||
|
||||
action = Column(String(255))
|
||||
error = Column(Text)
|
||||
|
||||
|
||||
class Volume(BASE, NovaBase):
|
||||
"""Represents a block storage device that can be attached to a vm."""
|
||||
__tablename__ = 'volumes'
|
||||
@ -421,7 +443,7 @@ class AuthToken(BASE, NovaBase):
|
||||
"""
|
||||
__tablename__ = 'auth_tokens'
|
||||
token_hash = Column(String(255), primary_key=True)
|
||||
user_id = Column(Integer)
|
||||
user_id = Column(String(255))
|
||||
server_manageent_url = Column(String(255))
|
||||
storage_url = Column(String(255))
|
||||
cdn_management_url = Column(String(255))
|
||||
@ -530,10 +552,11 @@ def register_models():
|
||||
it will never need to be called explicitly elsewhere.
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
|
||||
FloatingIp, Network, SecurityGroup,
|
||||
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
|
||||
AuthToken, User, Project) # , Image, Host
|
||||
models = (Service, Instance, InstanceActions,
|
||||
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
|
||||
Network, SecurityGroup, SecurityGroupIngressRule,
|
||||
SecurityGroupInstanceAssociation, AuthToken, User,
|
||||
Project, Certificate) # , Image, Host
|
||||
engine = create_engine(FLAGS.sql_connection, echo=False)
|
||||
for model in models:
|
||||
model.metadata.create_all(engine)
|
||||
|
@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False):
|
||||
global _MAKER
|
||||
if not _MAKER:
|
||||
if not _ENGINE:
|
||||
_ENGINE = create_engine(FLAGS.sql_connection, echo=False)
|
||||
_ENGINE = create_engine(FLAGS.sql_connection,
|
||||
pool_recycle=FLAGS.sql_idle_timeout,
|
||||
echo=False)
|
||||
_MAKER = (sessionmaker(bind=_ENGINE,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit))
|
||||
|
@ -27,23 +27,26 @@ import traceback
|
||||
|
||||
|
||||
class ProcessExecutionError(IOError):
|
||||
|
||||
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
||||
description=None):
|
||||
if description is None:
|
||||
description = "Unexpected error while running command."
|
||||
description = _("Unexpected error while running command.")
|
||||
if exit_code is None:
|
||||
exit_code = '-'
|
||||
message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
|
||||
description, cmd, exit_code, stdout, stderr)
|
||||
message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
|
||||
% (description, cmd, exit_code, stdout, stderr)
|
||||
IOError.__init__(self, message)
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
|
||||
def __init__(self, message=None):
|
||||
super(Error, self).__init__(message)
|
||||
|
||||
|
||||
class ApiError(Error):
|
||||
|
||||
def __init__(self, message='Unknown', code='Unknown'):
|
||||
self.message = message
|
||||
self.code = code
|
||||
@ -81,7 +84,7 @@ def wrap_exception(f):
|
||||
except Exception, e:
|
||||
if not isinstance(e, Error):
|
||||
#exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
logging.exception('Uncaught exception')
|
||||
logging.exception(_('Uncaught exception'))
|
||||
#logging.error(traceback.extract_stack(exc_traceback))
|
||||
raise Error(str(e))
|
||||
raise
|
||||
|
59
nova/fakememcache.py
Normal file
59
nova/fakememcache.py
Normal file
@ -0,0 +1,59 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Super simple fake memcache client."""
|
||||
|
||||
import utils
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""Replicates a tiny subset of memcached client interface."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Ignores the passed in args"""
|
||||
self.cache = {}
|
||||
|
||||
def get(self, key):
|
||||
"""Retrieves the value for a key or None."""
|
||||
(timeout, value) = self.cache.get(key, (0, None))
|
||||
if timeout == 0 or utils.utcnow_ts() < timeout:
|
||||
return value
|
||||
return None
|
||||
|
||||
def set(self, key, value, time=0, min_compress_len=0):
|
||||
"""Sets the value for a key."""
|
||||
timeout = 0
|
||||
if time != 0:
|
||||
timeout = utils.utcnow_ts() + time
|
||||
self.cache[key] = (timeout, value)
|
||||
return True
|
||||
|
||||
def add(self, key, value, time=0, min_compress_len=0):
|
||||
"""Sets the value for a key if it doesn't exist."""
|
||||
if not self.get(key) is None:
|
||||
return False
|
||||
return self.set(key, value, time, min_compress_len)
|
||||
|
||||
def incr(self, key, delta=1):
|
||||
"""Increments the value for a key."""
|
||||
value = self.get(key)
|
||||
if value is None:
|
||||
return None
|
||||
new_value = int(value) + delta
|
||||
self.cache[key] = (self.cache[key][0], str(new_value))
|
||||
return new_value
|
@ -25,6 +25,10 @@ from carrot.backends import base
|
||||
from eventlet import greenthread
|
||||
|
||||
|
||||
EXCHANGES = {}
|
||||
QUEUES = {}
|
||||
|
||||
|
||||
class Message(base.BaseMessage):
|
||||
pass
|
||||
|
||||
@ -37,12 +41,12 @@ class Exchange(object):
|
||||
self._routes = {}
|
||||
|
||||
def publish(self, message, routing_key=None):
|
||||
logging.debug('(%s) publish (key: %s) %s',
|
||||
logging.debug(_('(%s) publish (key: %s) %s'),
|
||||
self.name, routing_key, message)
|
||||
routing_key = routing_key.split('.')[0]
|
||||
if routing_key in self._routes:
|
||||
for f in self._routes[routing_key]:
|
||||
logging.debug('Publishing to route %s', f)
|
||||
logging.debug(_('Publishing to route %s'), f)
|
||||
f(message, routing_key=routing_key)
|
||||
|
||||
def bind(self, callback, routing_key):
|
||||
@ -68,81 +72,63 @@ class Queue(object):
|
||||
return self._queue.get()
|
||||
|
||||
|
||||
class Backend(object):
|
||||
""" Singleton backend for testing """
|
||||
class __impl(base.BaseBackend):
|
||||
def __init__(self, *args, **kwargs):
|
||||
#super(__impl, self).__init__(*args, **kwargs)
|
||||
self._exchanges = {}
|
||||
self._queues = {}
|
||||
class Backend(base.BaseBackend):
|
||||
def queue_declare(self, queue, **kwargs):
|
||||
global QUEUES
|
||||
if queue not in QUEUES:
|
||||
logging.debug(_('Declaring queue %s'), queue)
|
||||
QUEUES[queue] = Queue(queue)
|
||||
|
||||
def _reset_all(self):
|
||||
self._exchanges = {}
|
||||
self._queues = {}
|
||||
def exchange_declare(self, exchange, type, *args, **kwargs):
|
||||
global EXCHANGES
|
||||
if exchange not in EXCHANGES:
|
||||
logging.debug(_('Declaring exchange %s'), exchange)
|
||||
EXCHANGES[exchange] = Exchange(exchange, type)
|
||||
|
||||
def queue_declare(self, queue, **kwargs):
|
||||
if queue not in self._queues:
|
||||
logging.debug('Declaring queue %s', queue)
|
||||
self._queues[queue] = Queue(queue)
|
||||
def queue_bind(self, queue, exchange, routing_key, **kwargs):
|
||||
global EXCHANGES
|
||||
global QUEUES
|
||||
logging.debug(_('Binding %s to %s with key %s'),
|
||||
queue, exchange, routing_key)
|
||||
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||
|
||||
def exchange_declare(self, exchange, type, *args, **kwargs):
|
||||
if exchange not in self._exchanges:
|
||||
logging.debug('Declaring exchange %s', exchange)
|
||||
self._exchanges[exchange] = Exchange(exchange, type)
|
||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
||||
self.current_queue = queue
|
||||
self.current_callback = callback
|
||||
|
||||
def queue_bind(self, queue, exchange, routing_key, **kwargs):
|
||||
logging.debug('Binding %s to %s with key %s',
|
||||
queue, exchange, routing_key)
|
||||
self._exchanges[exchange].bind(self._queues[queue].push,
|
||||
routing_key)
|
||||
def consume(self, limit=None):
|
||||
while True:
|
||||
item = self.get(self.current_queue)
|
||||
if item:
|
||||
self.current_callback(item)
|
||||
raise StopIteration()
|
||||
greenthread.sleep(0)
|
||||
|
||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
||||
self.current_queue = queue
|
||||
self.current_callback = callback
|
||||
def get(self, queue, no_ack=False):
|
||||
global QUEUES
|
||||
if not queue in QUEUES or not QUEUES[queue].size():
|
||||
return None
|
||||
(message_data, content_type, content_encoding) = QUEUES[queue].pop()
|
||||
message = Message(backend=self, body=message_data,
|
||||
content_type=content_type,
|
||||
content_encoding=content_encoding)
|
||||
message.result = True
|
||||
logging.debug(_('Getting from %s: %s'), queue, message)
|
||||
return message
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
while True:
|
||||
item = self.get(self.current_queue)
|
||||
if item:
|
||||
self.current_callback(item)
|
||||
raise StopIteration()
|
||||
greenthread.sleep(0)
|
||||
def prepare_message(self, message_data, delivery_mode,
|
||||
content_type, content_encoding, **kwargs):
|
||||
"""Prepare message for sending."""
|
||||
return (message_data, content_type, content_encoding)
|
||||
|
||||
def get(self, queue, no_ack=False):
|
||||
if not queue in self._queues or not self._queues[queue].size():
|
||||
return None
|
||||
(message_data, content_type, content_encoding) = \
|
||||
self._queues[queue].pop()
|
||||
message = Message(backend=self, body=message_data,
|
||||
content_type=content_type,
|
||||
content_encoding=content_encoding)
|
||||
message.result = True
|
||||
logging.debug('Getting from %s: %s', queue, message)
|
||||
return message
|
||||
|
||||
def prepare_message(self, message_data, delivery_mode,
|
||||
content_type, content_encoding, **kwargs):
|
||||
"""Prepare message for sending."""
|
||||
return (message_data, content_type, content_encoding)
|
||||
|
||||
def publish(self, message, exchange, routing_key, **kwargs):
|
||||
if exchange in self._exchanges:
|
||||
self._exchanges[exchange].publish(
|
||||
message, routing_key=routing_key)
|
||||
|
||||
__instance = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if Backend.__instance is None:
|
||||
Backend.__instance = Backend.__impl(*args, **kwargs)
|
||||
self.__dict__['_Backend__instance'] = Backend.__instance
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.__instance, attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
return setattr(self.__instance, attr, value)
|
||||
def publish(self, message, exchange, routing_key, **kwargs):
|
||||
global EXCHANGES
|
||||
if exchange in EXCHANGES:
|
||||
EXCHANGES[exchange].publish(message, routing_key=routing_key)
|
||||
|
||||
|
||||
def reset_all():
|
||||
Backend()._reset_all()
|
||||
global EXCHANGES
|
||||
global QUEUES
|
||||
EXCHANGES = {}
|
||||
QUEUES = {}
|
||||
|
@ -29,6 +29,8 @@ import sys
|
||||
|
||||
import gflags
|
||||
|
||||
from nova import utils
|
||||
|
||||
|
||||
class FlagValues(gflags.FlagValues):
|
||||
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
|
||||
@ -159,6 +161,7 @@ class StrWrapper(object):
|
||||
return str(val)
|
||||
raise KeyError(name)
|
||||
|
||||
|
||||
FLAGS = FlagValues()
|
||||
gflags.FLAGS = FLAGS
|
||||
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
|
||||
@ -183,6 +186,12 @@ DEFINE_list = _wrapper(gflags.DEFINE_list)
|
||||
DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
|
||||
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
||||
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
||||
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
|
||||
|
||||
|
||||
HelpFlag = gflags.HelpFlag
|
||||
HelpshortFlag = gflags.HelpshortFlag
|
||||
HelpXMLFlag = gflags.HelpXMLFlag
|
||||
|
||||
|
||||
def DECLARE(name, module_string, flag_values=FLAGS):
|
||||
@ -203,8 +212,11 @@ DEFINE_list('region_list',
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
||||
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
|
||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||
DEFINE_string('scheduler_topic', 'scheduler',
|
||||
'the topic scheduler nodes listen on')
|
||||
@ -223,22 +235,25 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
||||
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
||||
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
||||
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
||||
'Url to ec2 api server')
|
||||
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
|
||||
DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
|
||||
DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
|
||||
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
|
||||
|
||||
DEFINE_string('default_project', 'openstack', 'default project for openstack')
|
||||
DEFINE_string('default_image', 'ami-11111',
|
||||
'default image to use, testing only')
|
||||
DEFINE_string('default_kernel', 'aki-11111',
|
||||
'default kernel to use, testing only')
|
||||
DEFINE_string('default_ramdisk', 'ari-11111',
|
||||
'default ramdisk to use, testing only')
|
||||
DEFINE_string('default_instance_type', 'm1.small',
|
||||
'default instance type to use, testing only')
|
||||
DEFINE_string('null_kernel', 'nokernel',
|
||||
'kernel image that indicates not to use a kernel,'
|
||||
' but to use a raw disk image instead')
|
||||
|
||||
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
|
||||
DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server')
|
||||
DEFINE_string('vpn_key_suffix',
|
||||
'-key',
|
||||
'Suffix to add to project name for vpn key')
|
||||
'-vpn',
|
||||
'Suffix to add to project name for vpn key and secgroups')
|
||||
|
||||
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
||||
|
||||
@ -248,6 +263,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||
DEFINE_string('sql_connection',
|
||||
'sqlite:///$state_path/nova.sqlite',
|
||||
'connection string for sql database')
|
||||
DEFINE_string('sql_idle_timeout',
|
||||
'3600',
|
||||
'timeout for idle sql database connections')
|
||||
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
|
||||
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
|
||||
|
||||
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||
'Manager for compute')
|
||||
|
@ -24,6 +24,7 @@ import urlparse
|
||||
|
||||
import webob.exc
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import utils
|
||||
from nova import flags
|
||||
from nova import exception
|
||||
@ -77,8 +78,8 @@ class ParallaxClient(object):
|
||||
data = json.loads(res.read())['images']
|
||||
return data
|
||||
else:
|
||||
logging.warn("Parallax returned HTTP error %d from "
|
||||
"request for /images", res.status_int)
|
||||
logging.warn(_("Parallax returned HTTP error %d from "
|
||||
"request for /images"), res.status_int)
|
||||
return []
|
||||
finally:
|
||||
c.close()
|
||||
@ -96,8 +97,8 @@ class ParallaxClient(object):
|
||||
data = json.loads(res.read())['images']
|
||||
return data
|
||||
else:
|
||||
logging.warn("Parallax returned HTTP error %d from "
|
||||
"request for /images/detail", res.status_int)
|
||||
logging.warn(_("Parallax returned HTTP error %d from "
|
||||
"request for /images/detail"), res.status_int)
|
||||
return []
|
||||
finally:
|
||||
c.close()
|
||||
|
@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService):
|
||||
result = self.index(context)
|
||||
result = [i for i in result if i['imageId'] == image_id]
|
||||
if not result:
|
||||
raise exception.NotFound('Image %s could not be found' % image_id)
|
||||
raise exception.NotFound(_('Image %s could not be found')
|
||||
% image_id)
|
||||
image = result[0]
|
||||
return image
|
||||
|
||||
|
@ -55,7 +55,6 @@ from nova import utils
|
||||
from nova import flags
|
||||
from nova.db import base
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@ -67,10 +66,9 @@ class Manager(base.Base):
|
||||
self.host = host
|
||||
super(Manager, self).__init__(db_driver)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def periodic_tasks(self, context=None):
|
||||
"""Tasks to be run at a periodic interval"""
|
||||
yield
|
||||
pass
|
||||
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a standalone
|
||||
|
@ -19,7 +19,6 @@ Implements vlans, bridges, and iptables rules using linux utilities.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
|
||||
# TODO(ja): does the definition of network_path belong here?
|
||||
|
||||
@ -46,34 +45,82 @@ flags.DEFINE_string('vlan_interface', 'eth0',
|
||||
'network device for vlans')
|
||||
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
|
||||
'location of nova-dhcpbridge')
|
||||
flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
|
||||
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||
flags.DEFINE_string('routing_source_ip', '127.0.0.1',
|
||||
flags.DEFINE_string('routing_source_ip', utils.get_my_ip(),
|
||||
'Public IP of network host')
|
||||
flags.DEFINE_bool('use_nova_chains', False,
|
||||
'use the nova_ routing chains instead of default')
|
||||
|
||||
|
||||
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
|
||||
flags.DEFINE_string('dns_server', None,
|
||||
'if set, uses specific dns server for dnsmasq')
|
||||
flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
|
||||
'dmz range that should be accepted')
|
||||
|
||||
|
||||
def metadata_forward():
|
||||
"""Create forwarding rule for metadata"""
|
||||
_confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
|
||||
"-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
|
||||
"--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port))
|
||||
"--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port))
|
||||
|
||||
|
||||
def init_host():
|
||||
"""Basic networking setup goes here"""
|
||||
|
||||
if FLAGS.use_nova_chains:
|
||||
_execute("sudo iptables -N nova_input", check_exit_code=False)
|
||||
_execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain,
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain)
|
||||
|
||||
_execute("sudo iptables -N nova_forward", check_exit_code=False)
|
||||
_execute("sudo iptables -D FORWARD -j nova_forward",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -A FORWARD -j nova_forward")
|
||||
|
||||
_execute("sudo iptables -N nova_output", check_exit_code=False)
|
||||
_execute("sudo iptables -D OUTPUT -j nova_output",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -A OUTPUT -j nova_output")
|
||||
|
||||
_execute("sudo iptables -t nat -N nova_prerouting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting")
|
||||
|
||||
_execute("sudo iptables -t nat -N nova_postrouting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting")
|
||||
|
||||
_execute("sudo iptables -t nat -N nova_snatting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting")
|
||||
|
||||
_execute("sudo iptables -t nat -N nova_output", check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -D OUTPUT -j nova_output",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -A OUTPUT -j nova_output")
|
||||
else:
|
||||
# NOTE(vish): This makes it easy to ensure snatting rules always
|
||||
# come after the accept rules in the postrouting chain
|
||||
_execute("sudo iptables -t nat -N SNATTING",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -D POSTROUTING -j SNATTING",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -t nat -A POSTROUTING -j SNATTING")
|
||||
|
||||
# NOTE(devcamcar): Cloud public SNAT entries and the default
|
||||
# SNAT rule for outbound traffic.
|
||||
_confirm_rule("POSTROUTING", "-t nat -s %s "
|
||||
_confirm_rule("SNATTING", "-t nat -s %s "
|
||||
"-j SNAT --to-source %s"
|
||||
% (FLAGS.fixed_range, FLAGS.routing_source_ip))
|
||||
% (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True)
|
||||
|
||||
_confirm_rule("POSTROUTING", "-t nat -s %s -j MASQUERADE" %
|
||||
FLAGS.fixed_range)
|
||||
_confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" %
|
||||
(FLAGS.fixed_range, FLAGS.dmz_cidr))
|
||||
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
|
||||
{'range': FLAGS.fixed_range})
|
||||
if(FLAGS.use_ipv6):
|
||||
@ -83,10 +130,11 @@ def init_host():
|
||||
'"echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"')
|
||||
|
||||
|
||||
def bind_floating_ip(floating_ip):
|
||||
def bind_floating_ip(floating_ip, check_exit_code=True):
|
||||
"""Bind ip to public interface"""
|
||||
_execute("sudo ip addr add %s dev %s" % (floating_ip,
|
||||
FLAGS.public_interface))
|
||||
FLAGS.public_interface),
|
||||
check_exit_code=check_exit_code)
|
||||
|
||||
|
||||
def unbind_floating_ip(floating_ip):
|
||||
@ -108,27 +156,16 @@ def ensure_floating_forward(floating_ip, fixed_ip):
|
||||
"""Ensure floating ip forwarding rule"""
|
||||
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
||||
% (floating_ip, fixed_ip))
|
||||
_confirm_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s"
|
||||
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
||||
% (fixed_ip, floating_ip))
|
||||
# TODO(joshua): Get these from the secgroup datastore entries
|
||||
_confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT"
|
||||
% (fixed_ip))
|
||||
for (protocol, port) in DEFAULT_PORTS:
|
||||
_confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT"
|
||||
% (fixed_ip, protocol, port))
|
||||
|
||||
|
||||
def remove_floating_forward(floating_ip, fixed_ip):
|
||||
"""Remove forwarding for floating ip"""
|
||||
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
||||
% (floating_ip, fixed_ip))
|
||||
_remove_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s"
|
||||
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
||||
% (fixed_ip, floating_ip))
|
||||
_remove_rule("FORWARD", "-d %s -p icmp -j ACCEPT"
|
||||
% (fixed_ip))
|
||||
for (protocol, port) in DEFAULT_PORTS:
|
||||
_remove_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT"
|
||||
% (fixed_ip, protocol, port))
|
||||
|
||||
|
||||
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
|
||||
@ -141,7 +178,7 @@ def ensure_vlan(vlan_num):
|
||||
"""Create a vlan unless it already exists"""
|
||||
interface = "vlan%s" % vlan_num
|
||||
if not _device_exists(interface):
|
||||
logging.debug("Starting VLAN inteface %s", interface)
|
||||
logging.debug(_("Starting VLAN inteface %s"), interface)
|
||||
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
|
||||
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
|
||||
_execute("sudo ifconfig %s up" % interface)
|
||||
@ -151,7 +188,7 @@ def ensure_vlan(vlan_num):
|
||||
def ensure_bridge(bridge, interface, net_attrs=None):
|
||||
"""Create a bridge unless it already exists"""
|
||||
if not _device_exists(bridge):
|
||||
logging.debug("Starting Bridge interface for %s", interface)
|
||||
logging.debug(_("Starting Bridge interface for %s"), interface)
|
||||
_execute("sudo brctl addbr %s" % bridge)
|
||||
_execute("sudo brctl setfd %s 0" % bridge)
|
||||
# _execute("sudo brctl setageing %s 10" % bridge)
|
||||
@ -170,6 +207,15 @@ def ensure_bridge(bridge, interface, net_attrs=None):
|
||||
net_attrs['cidr_v6']))
|
||||
else:
|
||||
_execute("sudo ifconfig %s up" % bridge)
|
||||
if FLAGS.use_nova_chains:
|
||||
(out, err) = _execute("sudo iptables -N nova_forward",
|
||||
check_exit_code=False)
|
||||
if err != 'iptables: Chain already exists.\n':
|
||||
# NOTE(vish): chain didn't exist link chain
|
||||
_execute("sudo iptables -D FORWARD -j nova_forward",
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -A FORWARD -j nova_forward")
|
||||
|
||||
_confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
|
||||
_confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
|
||||
|
||||
@ -212,9 +258,9 @@ def update_dhcp(context, network_id):
|
||||
_execute('sudo kill -HUP %d' % pid)
|
||||
return
|
||||
except Exception as exc: # pylint: disable-msg=W0703
|
||||
logging.debug("Hupping dnsmasq threw %s", exc)
|
||||
logging.debug(_("Hupping dnsmasq threw %s"), exc)
|
||||
else:
|
||||
logging.debug("Pid %d is stale, relaunching dnsmasq", pid)
|
||||
logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
|
||||
|
||||
# FLAGFILE and DNSMASQ_INTERFACE in env
|
||||
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
|
||||
@ -290,13 +336,17 @@ def _device_exists(device):
|
||||
return not err
|
||||
|
||||
|
||||
def _confirm_rule(chain, cmd):
|
||||
def _confirm_rule(chain, cmd, append=False):
|
||||
"""Delete and re-add iptables rule"""
|
||||
if FLAGS.use_nova_chains:
|
||||
chain = "nova_%s" % chain.lower()
|
||||
if append:
|
||||
loc = "-A"
|
||||
else:
|
||||
loc = "-I"
|
||||
_execute("sudo iptables --delete %s %s" % (chain, cmd),
|
||||
check_exit_code=False)
|
||||
_execute("sudo iptables -I %s %s" % (chain, cmd))
|
||||
_execute("sudo iptables %s %s %s" % (loc, chain, cmd))
|
||||
|
||||
|
||||
def _remove_rule(chain, cmd):
|
||||
@ -319,6 +369,8 @@ def _dnsmasq_cmd(net):
|
||||
' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
|
||||
' --dhcp-script=%s' % FLAGS.dhcpbridge,
|
||||
' --leasefile-ro']
|
||||
if FLAGS.dns_server:
|
||||
cmd.append(' -h -R --server=%s' % FLAGS.dns_server)
|
||||
return ''.join(cmd)
|
||||
|
||||
|
||||
@ -339,7 +391,7 @@ def _stop_dnsmasq(network):
|
||||
try:
|
||||
_execute('sudo kill -TERM %d' % pid)
|
||||
except Exception as exc: # pylint: disable-msg=W0703
|
||||
logging.debug("Killing dnsmasq threw %s", exc)
|
||||
logging.debug(_("Killing dnsmasq threw %s"), exc)
|
||||
|
||||
|
||||
def _dhcp_file(bridge, kind):
|
||||
|
@ -47,9 +47,9 @@ topologies. All of the network commands are issued to a subclass of
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
import socket
|
||||
|
||||
import IPy
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
@ -57,6 +57,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import manager
|
||||
from nova import utils
|
||||
from nova import rpc
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -89,8 +90,13 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False,
|
||||
'Whether to update dhcp when fixed_ip is disassociated')
|
||||
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
|
||||
'Seconds after which a deallocated ip is disassociated')
|
||||
|
||||
flags.DEFINE_bool('use_ipv6', True,
|
||||
'use the ipv6')
|
||||
flags.DEFINE_string('network_host', socket.gethostname(),
|
||||
'Network host to use for ip allocation in flat modes')
|
||||
flags.DEFINE_bool('fake_call', False,
|
||||
'If True, skip using the queue and make local calls')
|
||||
|
||||
|
||||
class AddressAlreadyAllocated(exception.Error):
|
||||
@ -116,10 +122,20 @@ class NetworkManager(manager.Manager):
|
||||
ctxt = context.get_admin_context()
|
||||
for network in self.db.host_get_networks(ctxt, self.host):
|
||||
self._on_set_network_host(ctxt, network['id'])
|
||||
floating_ips = self.db.floating_ip_get_all_by_host(ctxt,
|
||||
self.host)
|
||||
for floating_ip in floating_ips:
|
||||
if floating_ip.get('fixed_ip', None):
|
||||
fixed_address = floating_ip['fixed_ip']['address']
|
||||
# NOTE(vish): The False here is because we ignore the case
|
||||
# that the ip is already bound.
|
||||
self.driver.bind_floating_ip(floating_ip['address'], False)
|
||||
self.driver.ensure_floating_forward(floating_ip['address'],
|
||||
fixed_address)
|
||||
|
||||
def set_network_host(self, context, network_id):
|
||||
"""Safely sets the host of the network."""
|
||||
logging.debug("setting network host")
|
||||
logging.debug(_("setting network host"))
|
||||
host = self.db.network_set_host(context,
|
||||
network_id,
|
||||
self.host)
|
||||
@ -178,10 +194,10 @@ class NetworkManager(manager.Manager):
|
||||
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
|
||||
instance_ref = fixed_ip_ref['instance']
|
||||
if not instance_ref:
|
||||
raise exception.Error("IP %s leased that isn't associated" %
|
||||
raise exception.Error(_("IP %s leased that isn't associated") %
|
||||
address)
|
||||
if instance_ref['mac_address'] != mac:
|
||||
raise exception.Error("IP %s leased to bad mac %s vs %s" %
|
||||
raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
|
||||
(address, instance_ref['mac_address'], mac))
|
||||
now = datetime.datetime.utcnow()
|
||||
self.db.fixed_ip_update(context,
|
||||
@ -189,7 +205,8 @@ class NetworkManager(manager.Manager):
|
||||
{'leased': True,
|
||||
'updated_at': now})
|
||||
if not fixed_ip_ref['allocated']:
|
||||
logging.warn("IP %s leased that was already deallocated", address)
|
||||
logging.warn(_("IP %s leased that was already deallocated"),
|
||||
address)
|
||||
|
||||
def release_fixed_ip(self, context, mac, address):
|
||||
"""Called by dhcp-bridge when ip is released."""
|
||||
@ -197,13 +214,13 @@ class NetworkManager(manager.Manager):
|
||||
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
|
||||
instance_ref = fixed_ip_ref['instance']
|
||||
if not instance_ref:
|
||||
raise exception.Error("IP %s released that isn't associated" %
|
||||
raise exception.Error(_("IP %s released that isn't associated") %
|
||||
address)
|
||||
if instance_ref['mac_address'] != mac:
|
||||
raise exception.Error("IP %s released from bad mac %s vs %s" %
|
||||
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
|
||||
(address, instance_ref['mac_address'], mac))
|
||||
if not fixed_ip_ref['leased']:
|
||||
logging.warn("IP %s released that was not leased", address)
|
||||
logging.warn(_("IP %s released that was not leased"), address)
|
||||
self.db.fixed_ip_update(context,
|
||||
fixed_ip_ref['address'],
|
||||
{'leased': False})
|
||||
@ -216,8 +233,8 @@ class NetworkManager(manager.Manager):
|
||||
network_ref = self.db.fixed_ip_get_network(context, address)
|
||||
self.driver.update_dhcp(context, network_ref['id'])
|
||||
|
||||
def get_network(self, context):
|
||||
"""Get the network for the current context."""
|
||||
def get_network_host(self, context):
|
||||
"""Get the network host for the current context."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_networks(self, context, num_networks, network_size, cidr_v6,
|
||||
@ -305,10 +322,6 @@ class FlatManager(NetworkManager):
|
||||
"""Network is created manually."""
|
||||
pass
|
||||
|
||||
def setup_fixed_ip(self, context, address):
|
||||
"""Currently no setup."""
|
||||
pass
|
||||
|
||||
def create_networks(self, context, cidr, num_networks, network_size,
|
||||
cidr_v6, *args, **kwargs):
|
||||
"""Create networks based on parameters."""
|
||||
@ -337,14 +350,25 @@ class FlatManager(NetworkManager):
|
||||
if network_ref:
|
||||
self._create_fixed_ips(context, network_ref['id'])
|
||||
|
||||
def get_network(self, context):
|
||||
"""Get the network for the current context."""
|
||||
# NOTE(vish): To support mutilple network hosts, This could randomly
|
||||
# select from multiple networks instead of just
|
||||
# returning the one. It could also potentially be done
|
||||
# in the scheduler.
|
||||
return self.db.network_get_by_bridge(context,
|
||||
FLAGS.flat_network_bridge)
|
||||
def get_network_host(self, context):
|
||||
"""Get the network host for the current context."""
|
||||
network_ref = self.db.network_get_by_bridge(context,
|
||||
FLAGS.flat_network_bridge)
|
||||
# NOTE(vish): If the network has no host, use the network_host flag.
|
||||
# This could eventually be a a db lookup of some sort, but
|
||||
# a flag is easy to handle for now.
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
topic = self.db.queue_get_for(context,
|
||||
FLAGS.network_topic,
|
||||
FLAGS.network_host)
|
||||
if FLAGS.fake_call:
|
||||
return self.set_network_host(context, network_ref['id'])
|
||||
host = rpc.call(context,
|
||||
FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"network_id": network_ref['id']}})
|
||||
return host
|
||||
|
||||
def _on_set_network_host(self, context, network_id):
|
||||
"""Called when this host becomes the host for a network."""
|
||||
@ -373,13 +397,18 @@ class FlatDHCPManager(FlatManager):
|
||||
"""Sets up matching network for compute hosts."""
|
||||
network_ref = db.network_get_by_instance(context, instance_id)
|
||||
self.driver.ensure_bridge(network_ref['bridge'],
|
||||
FLAGS.flat_interface,
|
||||
network_ref)
|
||||
FLAGS.flat_interface)
|
||||
|
||||
def setup_fixed_ip(self, context, address):
|
||||
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
|
||||
"""Setup dhcp for this network."""
|
||||
address = super(FlatDHCPManager, self).allocate_fixed_ip(context,
|
||||
instance_id,
|
||||
*args,
|
||||
**kwargs)
|
||||
network_ref = db.fixed_ip_get_network(context, address)
|
||||
self.driver.update_dhcp(context, network_ref['id'])
|
||||
if not FLAGS.fake_network:
|
||||
self.driver.update_dhcp(context, network_ref['id'])
|
||||
return address
|
||||
|
||||
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
|
||||
"""Returns a fixed ip to the pool."""
|
||||
@ -410,10 +439,9 @@ class VlanManager(NetworkManager):
|
||||
instances in its subnet.
|
||||
"""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def periodic_tasks(self, context=None):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
yield super(VlanManager, self).periodic_tasks(context)
|
||||
super(VlanManager, self).periodic_tasks(context)
|
||||
now = datetime.datetime.utcnow()
|
||||
timeout = FLAGS.fixed_ip_disassociate_timeout
|
||||
time = now - datetime.timedelta(seconds=timeout)
|
||||
@ -421,7 +449,7 @@ class VlanManager(NetworkManager):
|
||||
self.host,
|
||||
time)
|
||||
if num:
|
||||
logging.debug("Dissassociated %s stale fixed ip(s)", num)
|
||||
logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
|
||||
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a
|
||||
@ -449,33 +477,20 @@ class VlanManager(NetworkManager):
|
||||
network_ref['id'],
|
||||
instance_id)
|
||||
self.db.fixed_ip_update(context, address, {'allocated': True})
|
||||
if not FLAGS.fake_network:
|
||||
self.driver.update_dhcp(context, network_ref['id'])
|
||||
return address
|
||||
|
||||
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
|
||||
"""Returns a fixed ip to the pool."""
|
||||
self.db.fixed_ip_update(context, address, {'allocated': False})
|
||||
|
||||
def setup_fixed_ip(self, context, address):
|
||||
"""Sets forwarding rules and dhcp for fixed ip."""
|
||||
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
|
||||
network_ref = self.db.fixed_ip_get_network(context, address)
|
||||
if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']):
|
||||
self.driver.ensure_vlan_forward(network_ref['vpn_public_address'],
|
||||
network_ref['vpn_public_port'],
|
||||
network_ref['vpn_private_address'])
|
||||
self.driver.update_dhcp(context, network_ref['id'])
|
||||
|
||||
def setup_compute_network(self, context, instance_id):
|
||||
"""Sets up matching network for compute hosts."""
|
||||
network_ref = db.network_get_by_instance(context, instance_id)
|
||||
self.driver.ensure_vlan_bridge(network_ref['vlan'],
|
||||
network_ref['bridge'])
|
||||
|
||||
def restart_nets(self):
|
||||
"""Ensure the network for each user is enabled."""
|
||||
# TODO(vish): Implement this
|
||||
pass
|
||||
|
||||
def create_networks(self, context, cidr, num_networks, network_size,
|
||||
vlan_start, vpn_start, cidr_v6):
|
||||
"""Create networks based on parameters."""
|
||||
@ -513,23 +528,48 @@ class VlanManager(NetworkManager):
|
||||
if network_ref:
|
||||
self._create_fixed_ips(context, network_ref['id'])
|
||||
|
||||
def get_network(self, context):
|
||||
def get_network_host(self, context):
|
||||
"""Get the network for the current context."""
|
||||
return self.db.project_get_network(context.elevated(),
|
||||
context.project_id)
|
||||
network_ref = self.db.project_get_network(context.elevated(),
|
||||
context.project_id)
|
||||
# NOTE(vish): If the network has no host, do a call to get an
|
||||
# available host. This should be changed to go through
|
||||
# the scheduler at some point.
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
if FLAGS.fake_call:
|
||||
return self.set_network_host(context, network_ref['id'])
|
||||
host = rpc.call(context,
|
||||
FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"network_id": network_ref['id']}})
|
||||
|
||||
return host
|
||||
|
||||
def _on_set_network_host(self, context, network_id):
|
||||
"""Called when this host becomes the host for a network."""
|
||||
network_ref = self.db.network_get(context, network_id)
|
||||
net = {}
|
||||
net['vpn_public_address'] = FLAGS.vpn_ip
|
||||
db.network_update(context, network_id, net)
|
||||
if not network_ref['vpn_public_address']:
|
||||
net = {}
|
||||
address = FLAGS.vpn_ip
|
||||
net['vpn_public_address'] = address
|
||||
db.network_update(context, network_id, net)
|
||||
else:
|
||||
address = network_ref['vpn_public_address']
|
||||
self.driver.ensure_vlan_bridge(network_ref['vlan'],
|
||||
network_ref['bridge'],
|
||||
network_ref)
|
||||
self.driver.update_dhcp(context, network_id)
|
||||
if(FLAGS.use_ipv6):
|
||||
self.driver.update_ra(context, network_id)
|
||||
|
||||
# NOTE(vish): only ensure this forward if the address hasn't been set
|
||||
# manually.
|
||||
if address == FLAGS.vpn_ip:
|
||||
self.driver.ensure_vlan_forward(FLAGS.vpn_ip,
|
||||
network_ref['vpn_public_port'],
|
||||
network_ref['vpn_private_address'])
|
||||
if not FLAGS.fake_network:
|
||||
self.driver.update_dhcp(context, network_id)
|
||||
if(FLAGS.use_ipv6):
|
||||
self.driver.update_ra(context, network_id)
|
||||
|
||||
@property
|
||||
def _bottom_reserved_ips(self):
|
||||
|
@ -102,7 +102,7 @@ def _render_parts(value, write_cb):
|
||||
_render_parts(subsubvalue, write_cb)
|
||||
write_cb('</' + utils.utf8(name) + '>')
|
||||
else:
|
||||
raise Exception("Unknown S3 value type %r", value)
|
||||
raise Exception(_("Unknown S3 value type %r"), value)
|
||||
|
||||
|
||||
def get_argument(request, key, default_value):
|
||||
@ -134,7 +134,7 @@ def get_context(request):
|
||||
check_type='s3')
|
||||
return context.RequestContext(user, project)
|
||||
except exception.Error as ex:
|
||||
logging.debug("Authentication Failure: %s", ex)
|
||||
logging.debug(_("Authentication Failure: %s"), ex)
|
||||
raise exception.NotAuthorized()
|
||||
|
||||
|
||||
@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource):
|
||||
|
||||
def render_PUT(self, request):
|
||||
"Creates the bucket resource"""
|
||||
logging.debug("Creating bucket %s", self.name)
|
||||
logging.debug(_("Creating bucket %s"), self.name)
|
||||
logging.debug("calling bucket.Bucket.create(%r, %r)",
|
||||
self.name,
|
||||
request.context)
|
||||
@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource):
|
||||
|
||||
def render_DELETE(self, request):
|
||||
"""Deletes the bucket resource"""
|
||||
logging.debug("Deleting bucket %s", self.name)
|
||||
logging.debug(_("Deleting bucket %s"), self.name)
|
||||
bucket_object = bucket.Bucket(self.name)
|
||||
|
||||
if not bucket_object.is_authorized(request.context):
|
||||
@ -261,7 +261,9 @@ class ObjectResource(ErrorHandlingResource):
|
||||
Raises NotAuthorized if user in request context is not
|
||||
authorized to delete the object.
|
||||
"""
|
||||
logging.debug("Getting object: %s / %s", self.bucket.name, self.name)
|
||||
logging.debug(_("Getting object: %s / %s"),
|
||||
self.bucket.name,
|
||||
self.name)
|
||||
|
||||
if not self.bucket.is_authorized(request.context):
|
||||
raise exception.NotAuthorized()
|
||||
@ -279,7 +281,9 @@ class ObjectResource(ErrorHandlingResource):
|
||||
Raises NotAuthorized if user in request context is not
|
||||
authorized to delete the object.
|
||||
"""
|
||||
logging.debug("Putting object: %s / %s", self.bucket.name, self.name)
|
||||
logging.debug(_("Putting object: %s / %s"),
|
||||
self.bucket.name,
|
||||
self.name)
|
||||
|
||||
if not self.bucket.is_authorized(request.context):
|
||||
raise exception.NotAuthorized()
|
||||
@ -298,7 +302,7 @@ class ObjectResource(ErrorHandlingResource):
|
||||
authorized to delete the object.
|
||||
"""
|
||||
|
||||
logging.debug("Deleting object: %s / %s",
|
||||
logging.debug(_("Deleting object: %s / %s"),
|
||||
self.bucket.name,
|
||||
self.name)
|
||||
|
||||
@ -394,17 +398,17 @@ class ImagesResource(resource.Resource):
|
||||
image_id = get_argument(request, 'image_id', u'')
|
||||
image_object = image.Image(image_id)
|
||||
if not image_object.is_authorized(request.context):
|
||||
logging.debug("not authorized for render_POST in images")
|
||||
logging.debug(_("not authorized for render_POST in images"))
|
||||
raise exception.NotAuthorized()
|
||||
|
||||
operation = get_argument(request, 'operation', u'')
|
||||
if operation:
|
||||
# operation implies publicity toggle
|
||||
logging.debug("handling publicity toggle")
|
||||
logging.debug(_("handling publicity toggle"))
|
||||
image_object.set_public(operation == 'add')
|
||||
else:
|
||||
# other attributes imply update
|
||||
logging.debug("update user fields")
|
||||
logging.debug(_("update user fields"))
|
||||
clean_args = {}
|
||||
for arg in request.args.keys():
|
||||
clean_args[arg] = request.args[arg][0]
|
||||
|
@ -267,6 +267,7 @@ class Image(object):
|
||||
if err:
|
||||
raise exception.Error("Failed to decrypt initialization "
|
||||
"vector: %s" % err)
|
||||
|
||||
_out, err = utils.execute(
|
||||
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
|
||||
% (encrypted_filename, key, iv, decrypted_filename),
|
||||
|
209
nova/process.py
209
nova/process.py
@ -1,209 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010 FathomDB Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Process pool using twisted threading
|
||||
"""
|
||||
|
||||
import logging
|
||||
import StringIO
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import error
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet import reactor
|
||||
|
||||
from nova import flags
|
||||
from nova.exception import ProcessExecutionError
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('process_pool_size', 4,
|
||||
'Number of processes to use in the process pool')
|
||||
|
||||
|
||||
# This is based on _BackRelay from twister.internal.utils, but modified to
|
||||
# capture both stdout and stderr, without odd stderr handling, and also to
|
||||
# handle stdin
|
||||
class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
"""
|
||||
Trivial protocol for communicating with a process and turning its output
|
||||
into the result of a L{Deferred}.
|
||||
|
||||
@ivar deferred: A L{Deferred} which will be called back with all of stdout
|
||||
and all of stderr as well (as a tuple). C{terminate_on_stderr} is true
|
||||
and any bytes are received over stderr, this will fire with an
|
||||
L{_ProcessExecutionError} instance and the attribute will be set to
|
||||
C{None}.
|
||||
|
||||
@ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
|
||||
received over stderr, this attribute will refer to a L{Deferred} which
|
||||
will be called back when the process ends. This C{Deferred} is also
|
||||
associated with the L{_ProcessExecutionError} which C{deferred} fires
|
||||
with earlier in this case so that users can determine when the process
|
||||
has actually ended, in addition to knowing when bytes have been
|
||||
received via stderr.
|
||||
"""
|
||||
|
||||
def __init__(self, deferred, cmd, started_deferred=None,
|
||||
terminate_on_stderr=False, check_exit_code=True,
|
||||
process_input=None):
|
||||
self.deferred = deferred
|
||||
self.cmd = cmd
|
||||
self.stdout = StringIO.StringIO()
|
||||
self.stderr = StringIO.StringIO()
|
||||
self.started_deferred = started_deferred
|
||||
self.terminate_on_stderr = terminate_on_stderr
|
||||
self.check_exit_code = check_exit_code
|
||||
self.process_input = process_input
|
||||
self.on_process_ended = None
|
||||
|
||||
def _build_execution_error(self, exit_code=None):
|
||||
return ProcessExecutionError(cmd=self.cmd,
|
||||
exit_code=exit_code,
|
||||
stdout=self.stdout.getvalue(),
|
||||
stderr=self.stderr.getvalue())
|
||||
|
||||
def errReceived(self, text):
|
||||
self.stderr.write(text)
|
||||
if self.terminate_on_stderr and (self.deferred is not None):
|
||||
self.on_process_ended = defer.Deferred()
|
||||
self.deferred.errback(self._build_execution_error())
|
||||
self.deferred = None
|
||||
self.transport.loseConnection()
|
||||
|
||||
def outReceived(self, text):
|
||||
self.stdout.write(text)
|
||||
|
||||
def processEnded(self, reason):
|
||||
if self.deferred is not None:
|
||||
stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
|
||||
exit_code = reason.value.exitCode
|
||||
if self.check_exit_code and exit_code != 0:
|
||||
self.deferred.errback(self._build_execution_error(exit_code))
|
||||
else:
|
||||
try:
|
||||
if self.check_exit_code:
|
||||
reason.trap(error.ProcessDone)
|
||||
self.deferred.callback((stdout, stderr))
|
||||
except:
|
||||
# NOTE(justinsb): This logic is a little suspicious to me.
|
||||
# If the callback throws an exception, then errback will
|
||||
# be called also. However, this is what the unit tests
|
||||
# test for.
|
||||
exec_error = self._build_execution_error(exit_code)
|
||||
self.deferred.errback(exec_error)
|
||||
elif self.on_process_ended is not None:
|
||||
self.on_process_ended.errback(reason)
|
||||
|
||||
def connectionMade(self):
|
||||
if self.started_deferred:
|
||||
self.started_deferred.callback(self)
|
||||
if self.process_input:
|
||||
self.transport.write(str(self.process_input))
|
||||
self.transport.closeStdin()
|
||||
|
||||
|
||||
def get_process_output(executable, args=None, env=None, path=None,
|
||||
process_reactor=None, check_exit_code=True,
|
||||
process_input=None, started_deferred=None,
|
||||
terminate_on_stderr=False):
|
||||
if process_reactor is None:
|
||||
process_reactor = reactor
|
||||
args = args and args or ()
|
||||
env = env and env and {}
|
||||
deferred = defer.Deferred()
|
||||
cmd = executable
|
||||
if args:
|
||||
cmd = " ".join([cmd] + args)
|
||||
logging.debug("Running cmd: %s", cmd)
|
||||
process_handler = BackRelayWithInput(
|
||||
deferred,
|
||||
cmd,
|
||||
started_deferred=started_deferred,
|
||||
check_exit_code=check_exit_code,
|
||||
process_input=process_input,
|
||||
terminate_on_stderr=terminate_on_stderr)
|
||||
# NOTE(vish): commands come in as unicode, but self.executes needs
|
||||
# strings or process.spawn raises a deprecation warning
|
||||
executable = str(executable)
|
||||
if not args is None:
|
||||
args = [str(x) for x in args]
|
||||
process_reactor.spawnProcess(process_handler, executable,
|
||||
(executable,) + tuple(args), env, path)
|
||||
return deferred
|
||||
|
||||
|
||||
class ProcessPool(object):
|
||||
""" A simple process pool implementation using Twisted's Process bits.
|
||||
|
||||
This is pretty basic right now, but hopefully the API will be the correct
|
||||
one so that it can be optimized later.
|
||||
"""
|
||||
def __init__(self, size=None):
|
||||
self.size = size and size or FLAGS.process_pool_size
|
||||
self._pool = defer.DeferredSemaphore(self.size)
|
||||
|
||||
def simple_execute(self, cmd, **kw):
|
||||
""" Weak emulation of the old utils.execute() function.
|
||||
|
||||
This only exists as a way to quickly move old execute methods to
|
||||
this new style of code.
|
||||
|
||||
NOTE(termie): This will break on args with spaces in them.
|
||||
"""
|
||||
parsed = cmd.split(' ')
|
||||
executable, args = parsed[0], parsed[1:]
|
||||
return self.execute(executable, args, **kw)
|
||||
|
||||
def execute(self, *args, **kw):
|
||||
deferred = self._pool.acquire()
|
||||
|
||||
def _associate_process(proto):
|
||||
deferred.process = proto.transport
|
||||
return proto.transport
|
||||
|
||||
started = defer.Deferred()
|
||||
started.addCallback(_associate_process)
|
||||
kw.setdefault('started_deferred', started)
|
||||
|
||||
deferred.process = None
|
||||
deferred.started = started
|
||||
|
||||
deferred.addCallback(lambda _: get_process_output(*args, **kw))
|
||||
deferred.addBoth(self._release)
|
||||
return deferred
|
||||
|
||||
def _release(self, retval=None):
|
||||
self._pool.release()
|
||||
return retval
|
||||
|
||||
|
||||
class SharedPool(object):
|
||||
_instance = None
|
||||
|
||||
def __init__(self):
|
||||
if SharedPool._instance is None:
|
||||
self.__class__._instance = ProcessPool()
|
||||
|
||||
def __getattr__(self, key):
|
||||
return getattr(self._instance, key)
|
||||
|
||||
|
||||
def simple_execute(cmd, **kwargs):
|
||||
return SharedPool().simple_execute(cmd, **kwargs)
|
119
nova/rpc.py
119
nova/rpc.py
@ -25,18 +25,18 @@ import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import uuid
|
||||
|
||||
from carrot import connection as carrot_connection
|
||||
from carrot import messaging
|
||||
from eventlet import greenthread
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import task
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import fakerabbit
|
||||
from nova import flags
|
||||
from nova import context
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -91,15 +91,15 @@ class Consumer(messaging.Consumer):
|
||||
self.failed_connection = False
|
||||
break
|
||||
except: # Catching all because carrot sucks
|
||||
logging.exception("AMQP server on %s:%d is unreachable." \
|
||||
" Trying again in %d seconds." % (
|
||||
logging.exception(_("AMQP server on %s:%d is unreachable."
|
||||
" Trying again in %d seconds.") % (
|
||||
FLAGS.rabbit_host,
|
||||
FLAGS.rabbit_port,
|
||||
FLAGS.rabbit_retry_interval))
|
||||
self.failed_connection = True
|
||||
if self.failed_connection:
|
||||
logging.exception("Unable to connect to AMQP server" \
|
||||
" after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
|
||||
logging.exception(_("Unable to connect to AMQP server"
|
||||
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
|
||||
sys.exit(1)
|
||||
|
||||
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
|
||||
@ -116,29 +116,21 @@ class Consumer(messaging.Consumer):
|
||||
self.declare()
|
||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
||||
if self.failed_connection:
|
||||
logging.error("Reconnected to queue")
|
||||
logging.error(_("Reconnected to queue"))
|
||||
self.failed_connection = False
|
||||
# NOTE(vish): This is catching all errors because we really don't
|
||||
# exceptions to be logged 10 times a second if some
|
||||
# persistent failure occurs.
|
||||
except Exception: # pylint: disable-msg=W0703
|
||||
if not self.failed_connection:
|
||||
logging.exception("Failed to fetch message from queue")
|
||||
logging.exception(_("Failed to fetch message from queue"))
|
||||
self.failed_connection = True
|
||||
|
||||
def attach_to_eventlet(self):
|
||||
"""Only needed for unit tests!"""
|
||||
def fetch_repeatedly():
|
||||
while True:
|
||||
self.fetch(enable_callbacks=True)
|
||||
greenthread.sleep(0.1)
|
||||
greenthread.spawn(fetch_repeatedly)
|
||||
|
||||
def attach_to_twisted(self):
|
||||
"""Attach a callback to twisted that fires 10 times a second"""
|
||||
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
|
||||
loop.start(interval=0.1)
|
||||
return loop
|
||||
timer = utils.LoopingCall(self.fetch, enable_callbacks=True)
|
||||
timer.start(0.1)
|
||||
return timer
|
||||
|
||||
|
||||
class Publisher(messaging.Publisher):
|
||||
@ -161,7 +153,7 @@ class TopicConsumer(Consumer):
|
||||
class AdapterConsumer(TopicConsumer):
|
||||
"""Calls methods on a proxy object based on method and args"""
|
||||
def __init__(self, connection=None, topic="broadcast", proxy=None):
|
||||
LOG.debug('Initing the Adapter Consumer for %s' % (topic))
|
||||
LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
|
||||
self.proxy = proxy
|
||||
super(AdapterConsumer, self).__init__(connection=connection,
|
||||
topic=topic)
|
||||
@ -176,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
|
||||
|
||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
||||
"""
|
||||
LOG.debug('received %s' % (message_data))
|
||||
LOG.debug(_('received %s') % (message_data))
|
||||
msg_id = message_data.pop('_msg_id', None)
|
||||
|
||||
ctxt = _unpack_context(message_data)
|
||||
@ -189,18 +181,20 @@ class AdapterConsumer(TopicConsumer):
|
||||
# messages stay in the queue indefinitely, so for now
|
||||
# we just log the message and send an error string
|
||||
# back to the caller
|
||||
LOG.warn('no method for message: %s' % (message_data))
|
||||
msg_reply(msg_id, 'No method for message: %s' % message_data)
|
||||
LOG.warn(_('no method for message: %s') % (message_data))
|
||||
msg_reply(msg_id, _('No method for message: %s') % message_data)
|
||||
return
|
||||
|
||||
node_func = getattr(self.proxy, str(method))
|
||||
node_args = dict((str(k), v) for k, v in args.iteritems())
|
||||
# NOTE(vish): magic is fun!
|
||||
# pylint: disable-msg=W0142
|
||||
d = defer.maybeDeferred(node_func, context=ctxt, **node_args)
|
||||
if msg_id:
|
||||
d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
|
||||
d.addErrback(lambda e: msg_reply(msg_id, None, e))
|
||||
try:
|
||||
rval = node_func(context=ctxt, **node_args)
|
||||
if msg_id:
|
||||
msg_reply(msg_id, rval, None)
|
||||
except Exception as e:
|
||||
if msg_id:
|
||||
msg_reply(msg_id, None, sys.exc_info())
|
||||
return
|
||||
|
||||
|
||||
@ -242,14 +236,16 @@ class DirectPublisher(Publisher):
|
||||
def msg_reply(msg_id, reply=None, failure=None):
|
||||
"""Sends a reply or an error on the channel signified by msg_id
|
||||
|
||||
failure should be a twisted failure object"""
|
||||
failure should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
if failure:
|
||||
message = failure.getErrorMessage()
|
||||
traceback = failure.getTraceback()
|
||||
logging.error("Returning exception %s to caller", message)
|
||||
logging.error(traceback)
|
||||
failure = (failure.type.__name__, str(failure.value), traceback)
|
||||
conn = Connection.instance()
|
||||
message = str(failure[1])
|
||||
tb = traceback.format_exception(*failure)
|
||||
logging.error(_("Returning exception %s to caller"), message)
|
||||
logging.error(tb)
|
||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||
conn = Connection.instance(True)
|
||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||
try:
|
||||
publisher.send({'result': reply, 'failure': failure})
|
||||
@ -287,7 +283,7 @@ def _unpack_context(msg):
|
||||
if key.startswith('_context_'):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
LOG.debug('unpacked context: %s', context_dict)
|
||||
LOG.debug(_('unpacked context: %s'), context_dict)
|
||||
return context.RequestContext.from_dict(context_dict)
|
||||
|
||||
|
||||
@ -306,14 +302,13 @@ def _pack_context(msg, context):
|
||||
|
||||
def call(context, topic, msg):
|
||||
"""Sends a message on a topic and wait for a response"""
|
||||
LOG.debug("Making asynchronous call...")
|
||||
LOG.debug(_("Making asynchronous call..."))
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug("MSG_ID is %s" % (msg_id))
|
||||
LOG.debug(_("MSG_ID is %s") % (msg_id))
|
||||
_pack_context(msg, context)
|
||||
|
||||
class WaitMessage(object):
|
||||
|
||||
def __call__(self, data, message):
|
||||
"""Acks message and sets result."""
|
||||
message.ack()
|
||||
@ -337,41 +332,15 @@ def call(context, topic, msg):
|
||||
except StopIteration:
|
||||
pass
|
||||
consumer.close()
|
||||
# NOTE(termie): this is a little bit of a change from the original
|
||||
# non-eventlet code where returning a Failure
|
||||
# instance from a deferred call is very similar to
|
||||
# raising an exception
|
||||
if isinstance(wait_msg.result, Exception):
|
||||
raise wait_msg.result
|
||||
return wait_msg.result
|
||||
|
||||
|
||||
def call_twisted(context, topic, msg):
|
||||
"""Sends a message on a topic and wait for a response"""
|
||||
LOG.debug("Making asynchronous call...")
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug("MSG_ID is %s" % (msg_id))
|
||||
_pack_context(msg, context)
|
||||
|
||||
conn = Connection.instance()
|
||||
d = defer.Deferred()
|
||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
||||
|
||||
def deferred_receive(data, message):
|
||||
"""Acks message and callbacks or errbacks"""
|
||||
message.ack()
|
||||
if data['failure']:
|
||||
return d.errback(RemoteError(*data['failure']))
|
||||
else:
|
||||
return d.callback(data['result'])
|
||||
|
||||
consumer.register_callback(deferred_receive)
|
||||
injected = consumer.attach_to_twisted()
|
||||
|
||||
# clean up after the injected listened and return x
|
||||
d.addCallback(lambda x: injected.stop() and x or x)
|
||||
|
||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
return d
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response"""
|
||||
LOG.debug("Making asynchronous cast...")
|
||||
@ -384,7 +353,7 @@ def cast(context, topic, msg):
|
||||
|
||||
def generic_response(message_data, message):
|
||||
"""Logs a result and exits"""
|
||||
LOG.debug('response %s', message_data)
|
||||
LOG.debug(_('response %s'), message_data)
|
||||
message.ack()
|
||||
sys.exit(0)
|
||||
|
||||
@ -393,8 +362,8 @@ def send_message(topic, message, wait=True):
|
||||
"""Sends a message for testing"""
|
||||
msg_id = uuid.uuid4().hex
|
||||
message.update({'_msg_id': msg_id})
|
||||
LOG.debug('topic is %s', topic)
|
||||
LOG.debug('message %s', message)
|
||||
LOG.debug(_('topic is %s'), topic)
|
||||
LOG.debug(_('message %s'), message)
|
||||
|
||||
if wait:
|
||||
consumer = messaging.Consumer(connection=Connection.instance(),
|
||||
|
@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler):
|
||||
|
||||
hosts = self.hosts_up(context, topic)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
@ -37,6 +37,11 @@ class NoValidHost(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
class WillNotSchedule(exception.Error):
|
||||
"""The specified host is not up or doesn't exist."""
|
||||
pass
|
||||
|
||||
|
||||
class Scheduler(object):
|
||||
"""The base class that all Scheduler clases should inherit from."""
|
||||
|
||||
@ -58,4 +63,4 @@ class Scheduler(object):
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
"""Must override at least this method for scheduler to work."""
|
||||
raise NotImplementedError("Must implement a fallback schedule")
|
||||
raise NotImplementedError(_("Must implement a fallback schedule"))
|
||||
|
@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager):
|
||||
db.queue_get_for(context, topic, host),
|
||||
{"method": method,
|
||||
"args": kwargs})
|
||||
logging.debug("Casting to %s %s for %s", topic, host, method)
|
||||
logging.debug(_("Casting to %s %s for %s"), topic, host, method)
|
||||
|
@ -43,11 +43,24 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if instance_ref['availability_zone'] and context.is_admin:
|
||||
zone, _x, host = instance_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s is not alive" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||
raise driver.NoValidHost("All hosts have too many cores")
|
||||
raise driver.NoValidHost(_("All hosts have too many cores"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
@ -57,16 +70,30 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
if (':' in volume_ref['availability_zone']) and context.is_admin:
|
||||
zone, _x, host = volume_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s not available" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
||||
raise driver.NoValidHost("All hosts have too many gigabytes")
|
||||
raise driver.NoValidHost(_("All hosts have too many "
|
||||
"gigabytes"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
@ -76,7 +103,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
|
||||
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest networks."""
|
||||
@ -85,7 +112,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
for result in results:
|
||||
(service, instance_count) = result
|
||||
if instance_count >= FLAGS.max_networks:
|
||||
raise driver.NoValidHost("All hosts have too many networks")
|
||||
raise driver.NoValidHost(_("All hosts have too many networks"))
|
||||
if self.service_is_up(service):
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
|
151
nova/server.py
151
nova/server.py
@ -1,151 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Base functionality for nova daemons - gradually being replaced with twistd.py.
|
||||
"""
|
||||
|
||||
import daemon
|
||||
from daemon import pidlockfile
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
from nova import flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_bool('daemonize', False, 'daemonize this process')
|
||||
# NOTE(termie): right now I am defaulting to using syslog when we daemonize
|
||||
# it may be better to do something else -shrug-
|
||||
# NOTE(Devin): I think we should let each process have its own log file
|
||||
# and put it in /var/logs/nova/(appname).log
|
||||
# This makes debugging much easier and cuts down on sys log
|
||||
# clutter.
|
||||
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
|
||||
flags.DEFINE_string('logfile', None, 'log file to output to')
|
||||
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
|
||||
'(will be prepended to $logfile)')
|
||||
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
||||
flags.DEFINE_string('working_directory', './', 'working directory...')
|
||||
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
|
||||
flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run')
|
||||
|
||||
|
||||
def stop(pidfile):
|
||||
"""
|
||||
Stop the daemon
|
||||
"""
|
||||
# Get the pid from the pidfile
|
||||
try:
|
||||
pid = int(open(pidfile, 'r').read().strip())
|
||||
except IOError:
|
||||
message = "pidfile %s does not exist. Daemon not running?\n"
|
||||
sys.stderr.write(message % pidfile)
|
||||
return
|
||||
|
||||
# Try killing the daemon process
|
||||
try:
|
||||
while 1:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
time.sleep(0.1)
|
||||
except OSError, err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if os.path.exists(pidfile):
|
||||
os.remove(pidfile)
|
||||
else:
|
||||
print str(err)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def serve(name, main):
|
||||
"""Controller for server"""
|
||||
argv = FLAGS(sys.argv)
|
||||
|
||||
if not FLAGS.pidfile:
|
||||
FLAGS.pidfile = '%s.pid' % name
|
||||
|
||||
logging.debug("Full set of FLAGS: \n\n\n")
|
||||
for flag in FLAGS:
|
||||
logging.debug("%s : %s", flag, FLAGS.get(flag, None))
|
||||
|
||||
action = 'start'
|
||||
if len(argv) > 1:
|
||||
action = argv.pop()
|
||||
|
||||
if action == 'stop':
|
||||
stop(FLAGS.pidfile)
|
||||
sys.exit()
|
||||
elif action == 'restart':
|
||||
stop(FLAGS.pidfile)
|
||||
elif action == 'start':
|
||||
pass
|
||||
else:
|
||||
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
||||
sys.exit(1)
|
||||
daemonize(argv, name, main)
|
||||
|
||||
|
||||
def daemonize(args, name, main):
|
||||
"""Does the work of daemonizing the process"""
|
||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
||||
files_to_keep = []
|
||||
if FLAGS.daemonize:
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(
|
||||
name + '(%(name)s): %(levelname)s %(message)s')
|
||||
if FLAGS.use_syslog and not FLAGS.logfile:
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log')
|
||||
syslog.setFormatter(formatter)
|
||||
logger.addHandler(syslog)
|
||||
files_to_keep.append(syslog.socket)
|
||||
else:
|
||||
if not FLAGS.logfile:
|
||||
FLAGS.logfile = '%s.log' % name
|
||||
if FLAGS.logdir:
|
||||
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
|
||||
logfile = logging.FileHandler(FLAGS.logfile)
|
||||
logfile.setFormatter(formatter)
|
||||
logger.addHandler(logfile)
|
||||
files_to_keep.append(logfile.stream)
|
||||
stdin, stdout, stderr = None, None, None
|
||||
else:
|
||||
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
|
||||
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
with daemon.DaemonContext(
|
||||
detach_process=FLAGS.daemonize,
|
||||
working_directory=FLAGS.working_directory,
|
||||
pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile,
|
||||
acquire_timeout=1,
|
||||
threaded=False),
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
uid=FLAGS.uid,
|
||||
gid=FLAGS.gid,
|
||||
files_preserve=files_to_keep):
|
||||
main(args)
|
107
nova/service.py
107
nova/service.py
@ -17,21 +17,17 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A service is a very thin wrapper around a Manager object. It exposes the
|
||||
manager's public methods to other components of the system via rpc. It will
|
||||
report state periodically to the database and is responsible for initiating
|
||||
any periodic tasts that need to be executed on a given host.
|
||||
|
||||
This module contains Service, a generic baseclass for all workers.
|
||||
Generic Node baseclass for all workers that run on hosts
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import task
|
||||
from twisted.application import service
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
from eventlet import greenpool
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
@ -50,8 +46,16 @@ flags.DEFINE_integer('periodic_interval', 60,
|
||||
'seconds between running periodic tasks',
|
||||
lower_bound=1)
|
||||
|
||||
flags.DEFINE_string('pidfile', None,
|
||||
'pidfile to use for this service')
|
||||
|
||||
class Service(object, service.Service):
|
||||
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Base class for workers that run on hosts."""
|
||||
|
||||
def __init__(self, host, binary, topic, manager, report_interval=None,
|
||||
@ -64,8 +68,9 @@ class Service(object, service.Service):
|
||||
self.periodic_interval = periodic_interval
|
||||
super(Service, self).__init__(*args, **kwargs)
|
||||
self.saved_args, self.saved_kwargs = args, kwargs
|
||||
self.timers = []
|
||||
|
||||
def startService(self): # pylint: disable-msg C0103
|
||||
def start(self):
|
||||
manager_class = utils.import_class(self.manager_class_name)
|
||||
self.manager = manager_class(host=self.host, *self.saved_args,
|
||||
**self.saved_kwargs)
|
||||
@ -80,26 +85,29 @@ class Service(object, service.Service):
|
||||
except exception.NotFound:
|
||||
self._create_service_ref(ctxt)
|
||||
|
||||
conn = rpc.Connection.instance()
|
||||
conn1 = rpc.Connection.instance(new=True)
|
||||
conn2 = rpc.Connection.instance(new=True)
|
||||
if self.report_interval:
|
||||
consumer_all = rpc.AdapterConsumer(
|
||||
connection=conn,
|
||||
connection=conn1,
|
||||
topic=self.topic,
|
||||
proxy=self)
|
||||
consumer_node = rpc.AdapterConsumer(
|
||||
connection=conn,
|
||||
connection=conn2,
|
||||
topic='%s.%s' % (self.topic, self.host),
|
||||
proxy=self)
|
||||
|
||||
consumer_all.attach_to_twisted()
|
||||
consumer_node.attach_to_twisted()
|
||||
self.timers.append(consumer_all.attach_to_eventlet())
|
||||
self.timers.append(consumer_node.attach_to_eventlet())
|
||||
|
||||
pulse = task.LoopingCall(self.report_state)
|
||||
pulse = utils.LoopingCall(self.report_state)
|
||||
pulse.start(interval=self.report_interval, now=False)
|
||||
self.timers.append(pulse)
|
||||
|
||||
if self.periodic_interval:
|
||||
pulse = task.LoopingCall(self.periodic_tasks)
|
||||
pulse.start(interval=self.periodic_interval, now=False)
|
||||
periodic = utils.LoopingCall(self.periodic_tasks)
|
||||
periodic.start(interval=self.periodic_interval, now=False)
|
||||
self.timers.append(periodic)
|
||||
|
||||
def _create_service_ref(self, context):
|
||||
service_ref = db.service_create(context,
|
||||
@ -143,29 +151,32 @@ class Service(object, service.Service):
|
||||
report_interval = FLAGS.report_interval
|
||||
if not periodic_interval:
|
||||
periodic_interval = FLAGS.periodic_interval
|
||||
logging.warn("Starting %s node", topic)
|
||||
logging.warn(_("Starting %s node"), topic)
|
||||
service_obj = cls(host, binary, topic, manager,
|
||||
report_interval, periodic_interval)
|
||||
|
||||
# This is the parent service that twistd will be looking for when it
|
||||
# parses this file, return it so that we can get it into globals.
|
||||
application = service.Application(binary)
|
||||
service_obj.setServiceParent(application)
|
||||
return application
|
||||
return service_obj
|
||||
|
||||
def kill(self):
|
||||
"""Destroy the service object in the datastore"""
|
||||
self.stop()
|
||||
try:
|
||||
db.service_destroy(context.get_admin_context(), self.service_id)
|
||||
except exception.NotFound:
|
||||
logging.warn("Service killed that has no database entry")
|
||||
logging.warn(_("Service killed that has no database entry"))
|
||||
|
||||
def stop(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception:
|
||||
pass
|
||||
self.timers = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def periodic_tasks(self):
|
||||
"""Tasks to be run at a periodic interval"""
|
||||
yield self.manager.periodic_tasks(context.get_admin_context())
|
||||
self.manager.periodic_tasks(context.get_admin_context())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def report_state(self):
|
||||
"""Update the state of this service in the datastore."""
|
||||
ctxt = context.get_admin_context()
|
||||
@ -173,8 +184,8 @@ class Service(object, service.Service):
|
||||
try:
|
||||
service_ref = db.service_get(ctxt, self.service_id)
|
||||
except exception.NotFound:
|
||||
logging.debug("The service database object disappeared, "
|
||||
"Recreating it.")
|
||||
logging.debug(_("The service database object disappeared, "
|
||||
"Recreating it."))
|
||||
self._create_service_ref(ctxt)
|
||||
service_ref = db.service_get(ctxt, self.service_id)
|
||||
|
||||
@ -185,11 +196,39 @@ class Service(object, service.Service):
|
||||
# TODO(termie): make this pattern be more elegant.
|
||||
if getattr(self, "model_disconnected", False):
|
||||
self.model_disconnected = False
|
||||
logging.error("Recovered model server connection!")
|
||||
logging.error(_("Recovered model server connection!"))
|
||||
|
||||
# TODO(vish): this should probably only catch connection errors
|
||||
except Exception: # pylint: disable-msg=W0702
|
||||
if not getattr(self, "model_disconnected", False):
|
||||
self.model_disconnected = True
|
||||
logging.exception("model server went away")
|
||||
yield
|
||||
logging.exception(_("model server went away"))
|
||||
|
||||
|
||||
def serve(*services):
|
||||
argv = FLAGS(sys.argv)
|
||||
|
||||
if not services:
|
||||
services = [Service.create()]
|
||||
|
||||
name = '_'.join(x.binary for x in services)
|
||||
logging.debug("Serving %s" % name)
|
||||
|
||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
||||
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
logging.debug(_("Full set of FLAGS:"))
|
||||
for flag in FLAGS:
|
||||
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
||||
|
||||
for x in services:
|
||||
x.start()
|
||||
|
||||
|
||||
def wait():
|
||||
while True:
|
||||
greenthread.sleep(5)
|
||||
|
107
nova/test.py
107
nova/test.py
@ -25,11 +25,12 @@ and some black magic for inline callbacks.
|
||||
import datetime
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import mox
|
||||
import stubout
|
||||
from twisted.internet import defer
|
||||
from twisted.trial import unittest
|
||||
from twisted.trial import unittest as trial_unittest
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
@ -37,9 +38,12 @@ from nova import fakerabbit
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova.network import manager as network_manager
|
||||
from nova.tests import fake_flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_bool('flush_db', True,
|
||||
'Flush the database before running fake tests')
|
||||
flags.DEFINE_bool('fake_tests', True,
|
||||
'should we use everything for testing')
|
||||
|
||||
@ -55,11 +59,11 @@ def skip_if_fake(func):
|
||||
return _skipper
|
||||
|
||||
|
||||
class TrialTestCase(unittest.TestCase):
|
||||
class TestCase(unittest.TestCase):
|
||||
"""Test case base class for all unit tests"""
|
||||
def setUp(self):
|
||||
"""Run before each test method to initialize test environment"""
|
||||
super(TrialTestCase, self).setUp()
|
||||
super(TestCase, self).setUp()
|
||||
# NOTE(vish): We need a better method for creating fixtures for tests
|
||||
# now that we have some required db setup for the system
|
||||
# to work properly.
|
||||
@ -70,8 +74,7 @@ class TrialTestCase(unittest.TestCase):
|
||||
FLAGS.fixed_range,
|
||||
5, 16,
|
||||
FLAGS.vlan_start,
|
||||
FLAGS.vpn_start,
|
||||
FLAGS.fixed_range_v6)
|
||||
FLAGS.vpn_start)
|
||||
|
||||
# emulate some of the mox stuff, we can't use the metaclass
|
||||
# because it screws with our generators
|
||||
@ -95,7 +98,88 @@ class TrialTestCase(unittest.TestCase):
|
||||
db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
|
||||
self.start)
|
||||
db.network_disassociate_all(ctxt)
|
||||
rpc.Consumer.attach_to_twisted = self.originalAttach
|
||||
rpc.Consumer.attach_to_eventlet = self.originalAttach
|
||||
for x in self.injected:
|
||||
try:
|
||||
x.stop()
|
||||
except AssertionError:
|
||||
pass
|
||||
|
||||
if FLAGS.fake_rabbit:
|
||||
fakerabbit.reset_all()
|
||||
|
||||
db.security_group_destroy_all(ctxt)
|
||||
super(TestCase, self).tearDown()
|
||||
finally:
|
||||
self.reset_flags()
|
||||
|
||||
def flags(self, **kw):
|
||||
"""Override flag variables for a test"""
|
||||
for k, v in kw.iteritems():
|
||||
if k in self.flag_overrides:
|
||||
self.reset_flags()
|
||||
raise Exception(
|
||||
'trying to override already overriden flag: %s' % k)
|
||||
self.flag_overrides[k] = getattr(FLAGS, k)
|
||||
setattr(FLAGS, k, v)
|
||||
|
||||
def reset_flags(self):
|
||||
"""Resets all flag variables for the test. Runs after each test"""
|
||||
FLAGS.Reset()
|
||||
for k, v in self._original_flags.iteritems():
|
||||
setattr(FLAGS, k, v)
|
||||
|
||||
def _monkey_patch_attach(self):
|
||||
self.originalAttach = rpc.Consumer.attach_to_eventlet
|
||||
|
||||
def _wrapped(innerSelf):
|
||||
rv = self.originalAttach(innerSelf)
|
||||
self.injected.append(rv)
|
||||
return rv
|
||||
|
||||
_wrapped.func_name = self.originalAttach.func_name
|
||||
rpc.Consumer.attach_to_eventlet = _wrapped
|
||||
|
||||
|
||||
class TrialTestCase(trial_unittest.TestCase):
|
||||
"""Test case base class for all unit tests"""
|
||||
def setUp(self):
|
||||
"""Run before each test method to initialize test environment"""
|
||||
super(TrialTestCase, self).setUp()
|
||||
# NOTE(vish): We need a better method for creating fixtures for tests
|
||||
# now that we have some required db setup for the system
|
||||
# to work properly.
|
||||
self.start = datetime.datetime.utcnow()
|
||||
ctxt = context.get_admin_context()
|
||||
if db.network_count(ctxt) != 5:
|
||||
network_manager.VlanManager().create_networks(ctxt,
|
||||
FLAGS.fixed_range,
|
||||
5, 16,
|
||||
FLAGS.vlan_start,
|
||||
FLAGS.vpn_start,
|
||||
FLAGS.fixed_range_v6)
|
||||
|
||||
# emulate some of the mox stuff, we can't use the metaclass
|
||||
# because it screws with our generators
|
||||
self.mox = mox.Mox()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.flag_overrides = {}
|
||||
self.injected = []
|
||||
self._original_flags = FLAGS.FlagValuesDict()
|
||||
|
||||
def tearDown(self):
|
||||
"""Runs after each test method to finalize/tear down test
|
||||
environment."""
|
||||
try:
|
||||
self.mox.UnsetStubs()
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.SmartUnsetAll()
|
||||
self.mox.VerifyAll()
|
||||
# NOTE(vish): Clean up any ips associated during the test.
|
||||
ctxt = context.get_admin_context()
|
||||
db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
|
||||
self.start)
|
||||
db.network_disassociate_all(ctxt)
|
||||
for x in self.injected:
|
||||
try:
|
||||
x.stop()
|
||||
@ -148,14 +232,3 @@ class TrialTestCase(unittest.TestCase):
|
||||
return d
|
||||
_wrapped.func_name = func.func_name
|
||||
return _wrapped
|
||||
|
||||
def _monkey_patch_attach(self):
|
||||
self.originalAttach = rpc.Consumer.attach_to_twisted
|
||||
|
||||
def _wrapped(innerSelf):
|
||||
rv = self.originalAttach(innerSelf)
|
||||
self.injected.append(rv)
|
||||
return rv
|
||||
|
||||
_wrapped.func_name = self.originalAttach.func_name
|
||||
rpc.Consumer.attach_to_twisted = _wrapped
|
||||
|
@ -1,81 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Test for the root WSGI middleware for all API controllers.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
import stubout
|
||||
import webob
|
||||
import webob.dec
|
||||
|
||||
import nova.exception
|
||||
from nova import api
|
||||
from nova.tests.api.fakes import APIStub
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
|
||||
def _request(self, url, subdomain, **kwargs):
|
||||
environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain}
|
||||
environ_keys.update(kwargs)
|
||||
req = webob.Request.blank(url, environ_keys)
|
||||
return req.get_response(api.API('ec2'))
|
||||
|
||||
def test_openstack(self):
|
||||
self.stubs.Set(api.openstack, 'API', APIStub)
|
||||
result = self._request('/v1.0/cloud', 'api')
|
||||
self.assertEqual(result.body, "/cloud")
|
||||
|
||||
def test_ec2(self):
|
||||
self.stubs.Set(api.ec2, 'API', APIStub)
|
||||
result = self._request('/services/cloud', 'ec2')
|
||||
self.assertEqual(result.body, "/cloud")
|
||||
|
||||
def test_not_found(self):
|
||||
self.stubs.Set(api.ec2, 'API', APIStub)
|
||||
self.stubs.Set(api.openstack, 'API', APIStub)
|
||||
result = self._request('/test/cloud', 'ec2')
|
||||
self.assertNotEqual(result.body, "/cloud")
|
||||
|
||||
def test_query_api_versions(self):
|
||||
result = self._request('/', 'api')
|
||||
self.assertTrue('CURRENT' in result.body)
|
||||
|
||||
def test_metadata(self):
|
||||
def go(url):
|
||||
result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2')
|
||||
# Each should get to the ORM layer and fail to find the IP
|
||||
self.assertRaises(nova.exception.NotFound, go, '/latest/')
|
||||
self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/')
|
||||
self.assertRaises(nova.exception.NotFound, go, '/1.0/')
|
||||
|
||||
def test_ec2_root(self):
|
||||
result = self._request('/', 'ec2')
|
||||
self.assertTrue('2007-12-15\n' in result.body)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -17,11 +17,16 @@
|
||||
|
||||
import unittest
|
||||
|
||||
from nova.api.openstack import limited
|
||||
from nova.api.openstack import RateLimitingMiddleware
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova.api.openstack.ratelimiting import RateLimitingMiddleware
|
||||
from nova.api.openstack.common import limited
|
||||
from nova.tests.api.fakes import APIStub
|
||||
from nova import utils
|
||||
from webob import Request
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class RateLimitingMiddlewareTest(unittest.TestCase):
|
||||
|
||||
@ -46,6 +51,8 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
|
||||
def exhaust(self, middleware, method, url, username, times):
|
||||
req = Request.blank(url, dict(REQUEST_METHOD=method),
|
||||
headers={'X-Auth-User': username})
|
||||
req.environ['nova.context'] = context.RequestContext(username,
|
||||
username)
|
||||
for i in range(times):
|
||||
resp = req.get_response(middleware)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
@ -62,7 +69,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
|
||||
middleware = RateLimitingMiddleware(APIStub())
|
||||
self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10)
|
||||
self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10)
|
||||
self.assertTrue(set(middleware.limiter._levels) ==
|
||||
self.assertTrue(set(middleware.limiter._levels) == \
|
||||
set(['usr1:POST', 'usr1:POST servers', 'usr2:POST']))
|
||||
|
||||
def test_POST_servers_action_correctly_ratelimited(self):
|
||||
|
@ -29,8 +29,11 @@ from nova import exception as exc
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
import nova.api.openstack.auth
|
||||
from nova.image import service
|
||||
from nova.api.openstack import auth
|
||||
from nova.api.openstack import ratelimiting
|
||||
from nova.image import glance
|
||||
from nova.image import local
|
||||
from nova.image import service
|
||||
from nova.tests import fake_flags
|
||||
from nova.wsgi import Router
|
||||
|
||||
@ -51,10 +54,11 @@ class FakeRouter(Router):
|
||||
return res
|
||||
|
||||
|
||||
def fake_auth_init(self):
|
||||
def fake_auth_init(self, application):
|
||||
self.db = FakeAuthDatabase()
|
||||
self.context = Context()
|
||||
self.auth = FakeAuthManager()
|
||||
self.application = application
|
||||
|
||||
|
||||
@webob.dec.wsgify
|
||||
@ -75,28 +79,28 @@ def stub_out_image_service(stubs):
|
||||
def fake_image_show(meh, context, id):
|
||||
return dict(kernelId=1, ramdiskId=1)
|
||||
|
||||
stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
|
||||
stubs.Set(local.LocalImageService, 'show', fake_image_show)
|
||||
|
||||
|
||||
def stub_out_auth(stubs):
|
||||
def fake_auth_init(self, app):
|
||||
self.application = app
|
||||
|
||||
stubs.Set(nova.api.openstack.AuthMiddleware,
|
||||
stubs.Set(nova.api.openstack.auth.AuthMiddleware,
|
||||
'__init__', fake_auth_init)
|
||||
stubs.Set(nova.api.openstack.AuthMiddleware,
|
||||
stubs.Set(nova.api.openstack.auth.AuthMiddleware,
|
||||
'__call__', fake_wsgi)
|
||||
|
||||
|
||||
def stub_out_rate_limiting(stubs):
|
||||
def fake_rate_init(self, app):
|
||||
super(nova.api.openstack.RateLimitingMiddleware, self).__init__(app)
|
||||
super(ratelimiting.RateLimitingMiddleware, self).__init__(app)
|
||||
self.application = app
|
||||
|
||||
stubs.Set(nova.api.openstack.RateLimitingMiddleware,
|
||||
stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
|
||||
'__init__', fake_rate_init)
|
||||
|
||||
stubs.Set(nova.api.openstack.RateLimitingMiddleware,
|
||||
stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
|
||||
'__call__', fake_wsgi)
|
||||
|
||||
|
||||
@ -106,6 +110,12 @@ def stub_out_networking(stubs):
|
||||
stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
|
||||
|
||||
|
||||
def stub_out_compute_api_snapshot(stubs):
|
||||
def snapshot(self, context, instance_id, name):
|
||||
return 123
|
||||
stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot)
|
||||
|
||||
|
||||
def stub_out_glance(stubs, initial_fixtures=[]):
|
||||
|
||||
class FakeParallaxClient:
|
||||
@ -173,7 +183,7 @@ class FakeToken(object):
|
||||
|
||||
|
||||
class FakeRequestContext(object):
|
||||
def __init__(self, user, project):
|
||||
def __init__(self, user, project, *args, **kwargs):
|
||||
self.user_id = 1
|
||||
self.project_id = 1
|
||||
|
||||
@ -209,6 +219,9 @@ class FakeAuthManager(object):
|
||||
return v
|
||||
return None
|
||||
|
||||
def get_project(self, pid):
|
||||
return None
|
||||
|
||||
def get_user_from_access_key(self, key):
|
||||
return FakeAuthManager.auth_data.get(key, None)
|
||||
|
||||
|
@ -34,7 +34,7 @@ class Test(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
|
||||
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
|
||||
'__init__', fakes.fake_auth_init)
|
||||
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
|
||||
fakes.FakeAuthManager.auth_data = {}
|
||||
@ -131,7 +131,7 @@ class Test(unittest.TestCase):
|
||||
class TestLimiter(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
|
||||
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
|
||||
'__init__', fakes.fake_auth_init)
|
||||
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
|
||||
fakes.FakeAuthManager.auth_data = {}
|
||||
|
@ -50,7 +50,7 @@ class BaseImageServiceTests(object):
|
||||
'updated': None,
|
||||
'created': None,
|
||||
'status': None,
|
||||
'serverId': None,
|
||||
'instance_id': None,
|
||||
'progress': None}
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
@ -67,7 +67,7 @@ class BaseImageServiceTests(object):
|
||||
'updated': None,
|
||||
'created': None,
|
||||
'status': None,
|
||||
'serverId': None,
|
||||
'instance_id': None,
|
||||
'progress': None}
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
@ -87,7 +87,7 @@ class BaseImageServiceTests(object):
|
||||
'updated': None,
|
||||
'created': None,
|
||||
'status': None,
|
||||
'serverId': None,
|
||||
'instance_id': None,
|
||||
'progress': None}
|
||||
|
||||
id = self.service.create(self.context, fixture)
|
||||
@ -105,13 +105,13 @@ class BaseImageServiceTests(object):
|
||||
'updated': None,
|
||||
'created': None,
|
||||
'status': None,
|
||||
'serverId': None,
|
||||
'instance_id': None,
|
||||
'progress': None},
|
||||
{'name': 'test image 2',
|
||||
'updated': None,
|
||||
'created': None,
|
||||
'status': None,
|
||||
'serverId': None,
|
||||
'instance_id': None,
|
||||
'progress': None}]
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
@ -155,6 +155,7 @@ class GlanceImageServiceTest(unittest.TestCase,
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
fakes.stub_out_glance(self.stubs)
|
||||
fakes.stub_out_compute_api_snapshot(self.stubs)
|
||||
service_class = 'nova.image.glance.GlanceImageService'
|
||||
self.service = utils.import_object(service_class)
|
||||
self.context = context.RequestContext(None, None)
|
||||
@ -223,6 +224,20 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
res_dict = json.loads(res.body)
|
||||
|
||||
def _is_equivalent_subset(x, y):
|
||||
if set(x) <= set(y):
|
||||
for k, v in x.iteritems():
|
||||
if x[k] != y[k]:
|
||||
if x[k] == 'active' and y[k] == 'available':
|
||||
continue
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
for image in res_dict['images']:
|
||||
self.assertEquals(1, self.IMAGE_FIXTURES.count(image),
|
||||
"image %s not in fixtures!" % str(image))
|
||||
for image_fixture in self.IMAGE_FIXTURES:
|
||||
if _is_equivalent_subset(image, image_fixture):
|
||||
break
|
||||
else:
|
||||
self.assertEquals(1, 2, "image %s not in fixtures!" %
|
||||
str(image))
|
||||
|
@ -56,11 +56,16 @@ def instance_address(context, instance_id):
|
||||
|
||||
|
||||
def stub_instance(id, user_id=1):
|
||||
return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
|
||||
return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id,
|
||||
display_name='server%s' % id, internal_id=id)
|
||||
|
||||
|
||||
def fake_compute_api(cls, req, id):
|
||||
return True
|
||||
|
||||
|
||||
class ServersTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
fakes.FakeAuthManager.auth_data = {}
|
||||
@ -82,9 +87,23 @@ class ServersTest(unittest.TestCase):
|
||||
instance_address)
|
||||
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
|
||||
instance_address)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics",
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions",
|
||||
fake_compute_api)
|
||||
self.allow_admin = FLAGS.allow_admin_api
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
FLAGS.allow_admin_api = self.allow_admin
|
||||
|
||||
def test_get_server_by_id(self):
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
@ -211,6 +230,66 @@ class ServersTest(unittest.TestCase):
|
||||
self.assertEqual(s['imageId'], 10)
|
||||
i += 1
|
||||
|
||||
def test_server_pause(self):
|
||||
FLAGS.allow_admin_api = True
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/servers/1/pause')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
def test_server_unpause(self):
|
||||
FLAGS.allow_admin_api = True
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/servers/1/unpause')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
def test_server_suspend(self):
|
||||
FLAGS.allow_admin_api = True
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/servers/1/suspend')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
def test_server_resume(self):
|
||||
FLAGS.allow_admin_api = True
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/servers/1/resume')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
def test_server_diagnostics(self):
|
||||
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
|
||||
req.method = "GET"
|
||||
res = req.get_response(nova.api.API("os"))
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
||||
def test_server_actions(self):
|
||||
req = webob.Request.blank("/v1.0/servers/1/actions")
|
||||
req.method = "GET"
|
||||
res = req.get_response(nova.api.API("os"))
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
||||
def test_server_reboot(self):
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
|
81
nova/tests/api/test.py
Normal file
81
nova/tests/api/test.py
Normal file
@ -0,0 +1,81 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Test for the root WSGI middleware for all API controllers.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
import stubout
|
||||
import webob
|
||||
import webob.dec
|
||||
|
||||
import nova.exception
|
||||
from nova import api
|
||||
from nova.tests.api.fakes import APIStub
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
|
||||
def _request(self, url, subdomain, **kwargs):
|
||||
environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain}
|
||||
environ_keys.update(kwargs)
|
||||
req = webob.Request.blank(url, environ_keys)
|
||||
return req.get_response(api.API('ec2'))
|
||||
|
||||
def test_openstack(self):
|
||||
self.stubs.Set(api.openstack, 'API', APIStub)
|
||||
result = self._request('/v1.0/cloud', 'api')
|
||||
self.assertEqual(result.body, "/cloud")
|
||||
|
||||
def test_ec2(self):
|
||||
self.stubs.Set(api.ec2, 'API', APIStub)
|
||||
result = self._request('/services/cloud', 'ec2')
|
||||
self.assertEqual(result.body, "/cloud")
|
||||
|
||||
def test_not_found(self):
|
||||
self.stubs.Set(api.ec2, 'API', APIStub)
|
||||
self.stubs.Set(api.openstack, 'API', APIStub)
|
||||
result = self._request('/test/cloud', 'ec2')
|
||||
self.assertNotEqual(result.body, "/cloud")
|
||||
|
||||
def test_query_api_versions(self):
|
||||
result = self._request('/', 'api')
|
||||
self.assertTrue('CURRENT' in result.body)
|
||||
|
||||
def test_metadata(self):
|
||||
def go(url):
|
||||
result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2')
|
||||
# Each should get to the ORM layer and fail to find the IP
|
||||
self.assertRaises(nova.exception.NotFound, go, '/latest/')
|
||||
self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/')
|
||||
self.assertRaises(nova.exception.NotFound, go, '/1.0/')
|
||||
|
||||
def test_ec2_root(self):
|
||||
result = self._request('/', 'ec2')
|
||||
self.assertTrue('2007-12-15\n' in result.body)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -1,54 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import boto
|
||||
from boto.ec2.regioninfo import RegionInfo
|
||||
import unittest
|
||||
|
||||
|
||||
ACCESS_KEY = 'fake'
|
||||
SECRET_KEY = 'fake'
|
||||
CLC_IP = '127.0.0.1'
|
||||
CLC_PORT = 8773
|
||||
REGION = 'test'
|
||||
|
||||
|
||||
def get_connection():
|
||||
return boto.connect_ec2(
|
||||
aws_access_key_id=ACCESS_KEY,
|
||||
aws_secret_access_key=SECRET_KEY,
|
||||
is_secure=False,
|
||||
region=RegionInfo(None, REGION, CLC_IP),
|
||||
port=CLC_PORT,
|
||||
path='/services/Cloud',
|
||||
debug=99)
|
||||
|
||||
|
||||
class APIIntegrationTests(unittest.TestCase):
|
||||
def test_001_get_all_images(self):
|
||||
conn = get_connection()
|
||||
res = conn.get_all_images()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
#print conn.get_all_key_pairs()
|
||||
#print conn.create_key_pair
|
||||
#print conn.create_security_group('name', 'description')
|
20
nova/tests/db/__init__.py
Normal file
20
nova/tests/db/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`db` -- Stubs for DB API
|
||||
=============================
|
||||
"""
|
75
nova/tests/db/fakes.py
Normal file
75
nova/tests/db/fakes.py
Normal file
@ -0,0 +1,75 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack, LLC
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Stubouts, mocks and fixtures for the test suite"""
|
||||
|
||||
import time
|
||||
|
||||
from nova import db
|
||||
from nova import utils
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
def stub_out_db_instance_api(stubs):
|
||||
""" Stubs out the db API for creating Instances """
|
||||
|
||||
class FakeModel(object):
|
||||
""" Stubs out for model """
|
||||
def __init__(self, values):
|
||||
self.values = values
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self.values[name]
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self.values:
|
||||
return self.values[key]
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
def fake_instance_create(values):
|
||||
""" Stubs out the db.instance_create method """
|
||||
|
||||
type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
|
||||
|
||||
base_options = {
|
||||
'name': values['name'],
|
||||
'id': values['id'],
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': values['image_id'],
|
||||
'kernel_id': values['kernel_id'],
|
||||
'ramdisk_id': values['ramdisk_id'],
|
||||
'state_description': 'scheduling',
|
||||
'user_id': values['user_id'],
|
||||
'project_id': values['project_id'],
|
||||
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
||||
'instance_type': values['instance_type'],
|
||||
'memory_mb': type_data['memory_mb'],
|
||||
'mac_address': values['mac_address'],
|
||||
'vcpus': type_data['vcpus'],
|
||||
'local_gb': type_data['local_gb'],
|
||||
}
|
||||
return FakeModel(base_options)
|
||||
|
||||
def fake_network_get_by_instance(context, instance_id):
|
||||
fields = {
|
||||
'bridge': 'xenbr0',
|
||||
}
|
||||
return FakeModel(fields)
|
||||
|
||||
stubs.Set(db, 'instance_create', fake_instance_create)
|
||||
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user