pulled changes from trunk

added console api to openstack api
This commit is contained in:
Monsyne Dragon
2011-01-05 19:02:24 -06:00
65 changed files with 2093 additions and 227 deletions

View File

@@ -6,6 +6,7 @@ keys
networks
nova.sqlite
CA/cacert.pem
CA/crl.pem
CA/index.txt*
CA/openssl.cnf
CA/serial*

View File

@@ -27,3 +27,6 @@
<vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<rlane@wikimedia.org> <laner@controller>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>

View File

@@ -1,9 +1,11 @@
Andy Smith <code@term.ie>
Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
@@ -26,6 +28,7 @@ Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>

109
bin/nova-api-paste Executable file
View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
import gettext
import logging
import os
import sys
from paste import deploy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import wsgi
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
FLAGS = flags.FLAGS
API_ENDPOINTS = ['ec2', 'openstack']
def load_configuration(paste_config):
"""Load the paste configuration from the config file and return it."""
config = None
# Try each known name to get the global DEFAULTS, which will give ports
for name in API_ENDPOINTS:
try:
config = deploy.appconfig("config:%s" % paste_config, name=name)
except LookupError:
pass
if config:
verbose = config.get('verbose', None)
if verbose:
FLAGS.verbose = int(verbose) == 1
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
return config
LOG.debug(_("Paste config at %s has no secion for known apis"),
paste_config)
print _("Paste config at %s has no secion for any known apis") % \
paste_config
os.exit(1)
def launch_api(paste_config_file, section, server, port, host):
"""Launch an api server from the specified port and IP."""
LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
app = deploy.loadapp('config:%s' % paste_config_file, name=section)
server.start(app, int(port), host)
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), configfile)
config = load_configuration(paste_config_file)
LOG.debug(_("Configuration: %r"), config)
server = wsgi.Server()
ip = config.get('host', '0.0.0.0')
for api in API_ENDPOINTS:
port = config.get("%s_port" % api, None)
if not port:
continue
host = config.get("%s_host" % api, ip)
launch_api(configfile, api, server, port, host)
LOG.debug(_("All api servers launched, now waiting"))
server.wait()
if __name__ == '__main__':
FLAGS(sys.argv)
configfiles = ['/etc/nova/nova-api.conf']
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
configfiles.insert(0,
os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
for configfile in configfiles:
if os.path.exists(configfile):
run_app(configfile)
break
else:
LOG.debug(_("Skipping missing configuration: %s"), configfile)

View File

@@ -53,6 +53,7 @@
CLI interface for nova management.
"""
import datetime
import gettext
import logging
import os
@@ -452,6 +453,52 @@ class NetworkCommands(object):
int(network_size), int(vlan_start),
int(vpn_start))
class ServiceCommands(object):
"""Enable and disable running services"""
def list(self, host=None, service=None):
"""Show a list of all running services. Filter by host & service name.
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
services = db.service_get_all(ctxt)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
active, art,
svc['updated_at'])
def enable(self, host, service):
"""Enable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': False})
def disable(self, host, service):
"""Disable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': True})
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -459,7 +506,8 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands)]
('network', NetworkCommands),
('service', ServiceCommands)]
def lazy_match(name, key_value_tuples):

View File

@@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (
Step 2: Install dependencies
----------------------------
Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first.
Nova requires rabbitmq for messaging, so install that first.
*Note:* You must have sudo installed to run these commands as shown here.
::
sudo apt-get install rabbitmq-server redis-server
sudo apt-get install rabbitmq-server
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
@@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
::
sudo apt-get install python-twisted
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2
sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list'
sudo apt-get update && sudo apt-get install python-gflags
sudo add-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
sudo apt-get update
sudo apt-get install python-twisted python-gflags
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`

View File

@@ -76,11 +76,11 @@ External unix tools that are required:
* aoetools and vblade-persist (if you use aoe-volumes)
Nova uses cutting-edge versions of many packages. There are ubuntu packages in
the nova-core ppa. You can use add this ppa to your sources list on an ubuntu
machine with the following commands::
the nova-core trunk ppa. You can use add this ppa to your sources list on an
ubuntu machine with the following commands::
sudo apt-get install -y python-software-properties
sudo add-apt-repository ppa:nova-core/ppa
sudo add-apt-repository ppa:nova-core/trunk
Recommended
-----------

View File

@@ -46,12 +46,12 @@ Assumptions
Step 1 Use apt-get to get the latest code
-----------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa.
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
::
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/ppa
sudo add-apt-repository ppa:nova-core/trunk
2. Run update.
@@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20
#. These need to be defined in the nova.conf configuration file::
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
--s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which
# will contain the VM images and buckets
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
--cc_host=$CC_ADDR # This is where the the nova-api service lives
--verbose # Optional but very helpful during initial setup
--ec2_url=http://$CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
--s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
# will contain the VM images and buckets
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
--cc_host=$CC_ADDR # This is where the the nova-api service lives
--verbose # Optional but very helpful during initial setup
--ec2_url=http://$CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
#. Create a nova group::
sudo addgroup nova
sudo addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.

View File

@@ -24,7 +24,7 @@ Routing
To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information.
URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ .
URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` .
See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two:
- mapper.connect() lets you map a single URL to a single action on a controller.
@@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju
Controllers and actions
-----------------------
Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller.
Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller.
See nova/api/openstack/servers.py for an example.
See `nova/api/openstack/servers.py` for an example.
Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
@@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML
If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>).
See nova/api/openstack/servers.py for an example.
See `nova/api/openstack/servers.py` for an example.
Faults
------

View File

@@ -71,8 +71,8 @@ RPC Casts
The diagram below the message flow during an rp.cast operation:
1. a Topic Publisher is instantiated to send the message request to the queuing system.
2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
1. A Topic Publisher is instantiated to send the message request to the queuing system.
2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
.. image:: /images/rabbit/flow2.png
:width: 60%

View File

@@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
Concept: Storage
----------------
@@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
Flat DHCP Mode
~~~~~~~~~~~~~~
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
VLAN DHCP Mode
~~~~~~~~~~~~~~
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
@@ -154,16 +154,16 @@ Concept: nova-manage
--------------------
The nova-manage command is used to perform many essential functions for
administration and ongoing maintenance of nova, such as user creation,
administration and ongoing maintenance of Nova, such as user creation,
vpn management, and much more.
See doc:`nova.manage` in the Administration Guide for more details.
See :doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags
--------------
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Concept: Plugins
@@ -181,7 +181,7 @@ Concept: Plugins
Concept: IPC/RPC
----------------
Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
Concept: Fakes
--------------

View File

@@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see
* HOST_IP
* Default: address of first interface from the ifconfig command
* Values: 127.0.0.1, or any other valid address
TEST
~~~~
**Default**: 0
**Values**: 1, run tests after checkout and initial setup
USE_MYSQL
~~~~~~~~~
**Default**: 0, use sqlite3
**Values**: 1, use mysql instead of sqlite3
MYSQL_PASS
~~~~~~~~~~
Only useful if $USE_MYSQL=1.
**Default**: nova
**Values**: value of root password for mysql
USE_LDAP
~~~~~~~~
**Default**: 0, use :mod:`nova.auth.dbdriver`
**Values**: 1, use :mod:`nova.auth.ldapdriver`
LIBVIRT_TYPE
~~~~~~~~~~~~
**Default**: qemu
**Values**: uml, kvm
* TEST
* Default: 0
* Values: 1, run tests after checkout and initial setup
* USE_MYSQL
* Default: 0, use sqlite3
* Values: 1, use mysql instead of sqlite3
* MYSQL_PASS (Only useful if $USE_MYSQL=1)
* Default: nova
* Values: value of root password for mysql
* USE_LDAP
* Default: 0, use :mod:`nova.auth.dbdriver`
* Values: 1, use :mod:`nova.auth.ldapdriver`
* LIBVIRT_TYPE
* Default: qemu
* Values: uml, kvm
Usage
-----

63
etc/nova-api.conf Normal file
View File

@@ -0,0 +1,63 @@
[DEFAULT]
verbose = 1
ec2_port = 8773
ec2_address = 0.0.0.0
openstack_port = 8774
openstack_address = 0.0.0.0
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/: ec2versions
/services: ec2api
/latest: ec2metadata
/200: ec2metadata
/1.0: ec2metadata
[pipeline:ec2api]
pipeline = authenticate router authorizer ec2executor
[filter:authenticate]
paste.filter_factory = nova.api.ec2:authenticate_factory
[filter:router]
paste.filter_factory = nova.api.ec2:router_factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:authorizer_factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:executor_factory
[app:ec2versions]
paste.app_factory = nova.api.ec2:versions_factory
[app:ec2metadata]
paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory
#############
# Openstack #
#############
[composite:openstack]
use = egg:Paste#urlmap
/: osversions
/v1.0: openstackapi
[pipeline:openstackapi]
pipeline = auth ratelimit osapi
[filter:auth]
paste.filter_factory = nova.api.openstack.auth:auth_factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory
[app:osapi]
paste.app_factory = nova.api.openstack:router_factory
[app:osversions]
paste.app_factory = nova.api.openstack:versions_factory

View File

@@ -294,10 +294,9 @@ class Executor(wsgi.Application):
args = req.environ['ec2.action_args']
api_request = apirequest.APIRequest(controller, action)
result = None
try:
result = api_request.send(context, **args)
req.headers['Content-Type'] = 'text/xml'
return result
except exception.ApiError as ex:
if ex.code:
@@ -307,6 +306,12 @@ class Executor(wsgi.Application):
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
return self._error(req, type(ex).__name__, str(ex))
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
def _error(self, req, code, message):
logging.error("%s: %s", code, message)
@@ -318,3 +323,49 @@ class Executor(wsgi.Application):
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
return resp
class Versions(wsgi.Application):
@webob.dec.wsgify
def __call__(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
return ''.join('%s\n' % v for v in versions)
def authenticate_factory(global_args, **local_args):
def authenticator(app):
return Authenticate(app)
return authenticator
def router_factory(global_args, **local_args):
def router(app):
return Router(app)
return router
def authorizer_factory(global_args, **local_args):
def authorizer(app):
return Authorizer(app)
return authorizer
def executor_factory(global_args, **local_args):
return Executor()
def versions_factory(global_args, **local_args):
return Versions()

View File

@@ -188,9 +188,46 @@ class CloudController(object):
return data
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
def _describe_availability_zones_verbose(self, context, **kwargs):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
services = db.service_get_all(context)
now = db.get_time()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
hosts.append(host)
for host in hosts:
rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host,
'zoneState': ''})
hsvcs = [service for service in services \
if service['host'] == host]
for svc in hsvcs:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= FLAGS.service_down_time)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
rv['availabilityZoneInfo'].append({
'zoneName': '| |- %s' % svc['binary'],
'zoneState': '%s %s %s' % (active, art,
svc['updated_at'])})
return rv
def describe_regions(self, context, region_name=None, **kwargs):
if FLAGS.region_list:
regions = []
@@ -765,6 +802,8 @@ class CloudController(object):
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
generate_hostname=internal_id_to_ec2_id)
return self._format_run_instances(context,
instances[0]['reservation_id'])

View File

@@ -79,3 +79,7 @@ class MetadataRequestHandler(object):
if data is None:
raise webob.exc.HTTPNotFound()
return self.print_data(data)
def metadata_factory(global_args, **local_args):
return MetadataRequestHandler()

View File

@@ -20,7 +20,6 @@
WSGI middleware for OpenStack API controllers.
"""
import json
import time
import logging
@@ -36,12 +35,12 @@ from nova import utils
from nova import wsgi
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import ratelimiting
from nova.api.openstack import servers
from nova.api.openstack import sharedipgroups
from nova.auth import manager
FLAGS = flags.FLAGS
@@ -93,6 +92,8 @@ class APIRouter(wsgi.Router):
logging.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
@@ -100,11 +101,16 @@ class APIRouter(wsgi.Router):
collection={'detail': 'GET'},
member=server_members)
mapper.resource("backup_schedule", "backup_schedules",
mapper.resource("backup_schedule", "backup_schedule",
controller=backup_schedules.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("console", "consoles",
controller=consoles.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("image", "images", controller=images.Controller(),
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors", controller=flavors.Controller(),
@@ -113,3 +119,24 @@ class APIRouter(wsgi.Router):
controller=sharedipgroups.Controller())
super(APIRouter, self).__init__(mapper)
class Versions(wsgi.Application):
@webob.dec.wsgify
def __call__(self, req):
"""Respond to a request for all OpenStack API versions."""
response = {
"versions": [
dict(status="CURRENT", id="v1.0")]}
metadata = {
"application/xml": {
"attributes": dict(version=["status", "id"])}}
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
def router_factory(global_cof, **local_conf):
return APIRouter()
def versions_factory(global_conf, **local_conf):
return Versions()

View File

@@ -55,7 +55,8 @@ class AuthMiddleware(wsgi.Middleware):
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
req.environ['nova.context'] = context.RequestContext(user, user)
project = self.auth.get_project(FLAGS.default_project)
req.environ['nova.context'] = context.RequestContext(user, project)
return self.application
def has_authentication(self, req):
@@ -133,3 +134,9 @@ class AuthMiddleware(wsgi.Middleware):
token = self.db.auth_create_token(ctxt, token_dict)
return token, user
return None, None
def auth_factory(global_conf, **local_conf):
def auth(app):
return AuthMiddleware(app)
return auth

View File

@@ -23,13 +23,25 @@ from nova.api.openstack import faults
import nova.image.service
def _translate_keys(inst):
""" Coerces the backup schedule into proper dictionary format """
return dict(backupSchedule=inst)
class Controller(wsgi.Controller):
""" The backup schedule API controller for the Openstack API """
_serialization_metadata = {
'application/xml': {
'attributes': {
'backupSchedule': []}}}
def __init__(self):
pass
def index(self, req, server_id):
return faults.Fault(exc.HTTPNotFound())
""" Returns the list of backup schedules for a given instance """
return _translate_keys({})
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
@@ -37,4 +49,5 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotFound())

View File

@@ -0,0 +1,92 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova import exception
from nova import wsgi
from nova.console import api as console_api
from nova.api.openstack import faults
def _translate_keys(inst):
"""Coerces a console instance into proper dictionary format """
return dict(console=inst)
def _translate_detail_keys(inst):
"""Coerces a console instance into proper dictionary format with
correctly mapped attributes """
return dict(console=inst)
class Controller(wsgi.Controller):
"""The Consoles Controller for the Openstack API"""
_serialization_metadata = {
'application/xml': {
'attributes': {
'console': []}}}
def __init__(self):
self.console_api = console_api.ConsoleAPI()
super(Controller, self).__init__()
def index(self, req, server_id):
"""Returns a list of consoles for this instance"""
consoles = self.console_api.get_consoles(
req.environ['nova.context'],
int(server_id))
return dict(consoles=[_translate_keys(console)
for console in consoles])
def create(self, req, server_id):
"""Creates a new console"""
#info = self._deserialize(req.body, req)
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
def show(self, req, server_id, id):
"""Shows in-depth information on a specific console"""
try:
console = self.console_api.get_console(
req.environ['nova.context'],
int(server_id),
int(id))
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return _translate_detail_keys(console)
def update(self, req, server_id, id):
"""You can't update a console"""
raise faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
"""Deletes a console"""
try:
self.console_api.delete_console(req.environ['nova.context'],
int(server_id),
int(id))
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
# def detail(self, req, id):
# """ Returns a complete list of consoles for this instance"""
# return _translate_detail_keys({})

View File

@@ -25,7 +25,7 @@ import nova.image.service
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -127,9 +127,11 @@ class Controller(wsgi.Controller):
raise faults.Fault(exc.HTTPNotFound())
def create(self, req):
# Only public images are supported for now, so a request to
# make a backup of a server cannot be supproted.
raise faults.Fault(exc.HTTPNotFound())
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
instance_id = env["image"]["serverId"]
name = env["image"]["name"]
return compute_api.ComputeAPI().snapshot(context, instance_id, name)
def update(self, req, id):
# Users may not modify public images, and that's all that

View File

@@ -64,9 +64,9 @@ class RateLimitingMiddleware(wsgi.Middleware):
If the request should be rate limited, return a 413 status with a
Retry-After header giving the time when the request would succeed.
"""
return self.limited_request(req, self.application)
return self.rate_limited_request(req, self.application)
def limited_request(self, req, application):
def rate_limited_request(self, req, application):
"""Rate limit the request.
If the request should be rate limited, return a 413 status with a
@@ -219,3 +219,9 @@ class WSGIAppProxy(object):
# No delay
return None
return float(resp.getheader('X-Wait-Seconds'))
def ratelimit_factory(global_conf, **local_conf):
def rl(app):
return RateLimitingMiddleware(app)
return rl

View File

@@ -35,14 +35,11 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
def _entity_list(entities):
""" Coerces a list of servers into proper dictionary format """
return dict(servers=entities)
def _entity_detail(inst):
""" Maps everything to Rackspace-like attributes for return"""
def _translate_detail_keys(inst):
""" Coerces into dictionary format, mapping everything to Rackspace-like
attributes for return"""
power_mapping = {
None: 'build',
power_state.NOSTATE: 'build',
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
@@ -67,8 +64,9 @@ def _entity_detail(inst):
return dict(server=inst_dict)
def _entity_inst(inst):
""" Filters all model attributes save for id and name """
def _translate_keys(inst):
""" Coerces into dictionary format, excluding all model attributes
save for id and name """
return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
@@ -87,29 +85,29 @@ class Controller(wsgi.Controller):
def index(self, req):
""" Returns a list of server names and ids for a given user """
return self._items(req, entity_maker=_entity_inst)
return self._items(req, entity_maker=_translate_keys)
def detail(self, req):
""" Returns a list of server details for a given user """
return self._items(req, entity_maker=_entity_detail)
return self._items(req, entity_maker=_translate_detail_keys)
def _items(self, req, entity_maker):
"""Returns a list of servers for a given user.
entity_maker - either _entity_detail or _entity_inst
entity_maker - either _translate_detail_keys or _translate_keys
"""
instance_list = self.compute_api.get_instances(
req.environ['nova.context'])
limited_list = common.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return _entity_list(res)
return dict(servers=res)
def show(self, req, id):
""" Returns server details by server id """
try:
instance = self.compute_api.get_instance(
req.environ['nova.context'], int(id))
return _entity_detail(instance)
return _translate_detail_keys(instance)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -138,7 +136,7 @@ class Controller(wsgi.Controller):
description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'])
return _entity_inst(instances[0])
return _translate_keys(instances[0])
def update(self, req, id):
""" Updates the server name or password """
@@ -153,8 +151,9 @@ class Controller(wsgi.Controller):
update_dict['display_name'] = inst_dict['server']['name']
try:
self.compute_api.update_instance(req.environ['nova.context'],
instance['id'],
ctxt = req.environ['nova.context']
self.compute_api.update_instance(ctxt,
id,
**update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -219,3 +218,13 @@ class Controller(wsgi.Controller):
logging.error(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
return self.compute_api.get_diagnostics(ctxt, id)
def actions(self, req, id):
"""Permit Admins to retrieve server actions."""
ctxt = req.environ["nova.context"]
return self.compute_api.get_actions(ctxt, id)

View File

@@ -15,26 +15,51 @@
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova import wsgi
from nova.api.openstack import faults
def _translate_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format """
return dict(sharedIpGroup=inst)
def _translate_detail_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format with
correctly mapped attributes """
return dict(sharedIpGroup=inst)
class Controller(wsgi.Controller):
""" The Shared IP Groups Controller for the Openstack API """
_serialization_metadata = {
'application/xml': {
'attributes': {
'sharedIpGroup': []}}}
def index(self, req):
raise NotImplementedError
""" Returns a list of Shared IP Groups for the user """
return dict(sharedIpGroups=[])
def show(self, req, id):
raise NotImplementedError
""" Shows in-depth information on a specific Shared IP Group """
return _translate_keys({})
def update(self, req, id):
raise NotImplementedError
""" You can't update a Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, id):
raise NotImplementedError
""" Deletes a Shared IP Group """
raise faults.Fault(exc.HTTPNotFound())
def detail(self, req):
raise NotImplementedError
def detail(self, req, id):
""" Returns a complete list of Shared IP Groups """
return _translate_detail_keys({})
def create(self, req):
raise NotImplementedError
""" Creates a new Shared IP group """
raise faults.Fault(exc.HTTPNotFound())

View File

@@ -74,6 +74,7 @@ class ComputeAPI(base.Base):
max_count=1, kernel_id=None, ramdisk_id=None,
display_name='', description='', key_name=None,
key_data=None, security_group='default',
availability_zone=None,
user_data=None,
generate_hostname=generate_default_hostname):
"""Create the number of instances requested if quote and
@@ -141,7 +142,8 @@ class ComputeAPI(base.Base):
'display_description': description,
'user_data': user_data or '',
'key_name': key_name,
'key_data': key_data}
'key_data': key_data,
'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
@@ -257,6 +259,15 @@ class ComputeAPI(base.Base):
def get_instance(self, context, instance_id):
return self.db.instance_get_by_internal_id(context, instance_id)
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "snapshot_instance",
"args": {"instance_id": instance['id'], "name": name}})
def reboot(self, context, instance_id):
"""Reboot the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
@@ -284,6 +295,20 @@ class ComputeAPI(base.Base):
{"method": "unpause_instance",
"args": {"instance_id": instance['id']}})
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance["host"]
return rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "get_diagnostics",
"args": {"instance_id": instance["id"]}})
def get_actions(self, context, instance_id):
"""Retrieve actions for the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
return self.db.instance_get_actions(context, instance["id"])
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
instance = self.db.instance_get_by_internal_id(context, instance_id)

View File

@@ -36,6 +36,7 @@ terminating it.
import datetime
import logging
import socket
from nova import exception
from nova import flags
@@ -51,6 +52,9 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
class ComputeManager(manager.Manager):
@@ -85,6 +89,15 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
def get_console_topic(self, context, **_kwargs):
"""Retrieves the console host for a project on this host
Currently this is just set in the flags for each compute
host."""
#TODO(mdragon): perhaps make this variable by console_type?
return self.db.queue_get_for(context,
FLAGS.console_topic,
FLAGS.console_host)
def get_network_topic(self, context, **_kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
@@ -229,6 +242,27 @@ class ComputeManager(manager.Manager):
self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
@exception.wrap_exception
def snapshot_instance(self, context, instance_id, name):
"""Snapshot an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
#NOTE(sirp): update_state currently only refreshes the state field
# if we add is_snapshotting, we will need this refreshed too,
# potentially?
self._update_state(context, instance_id)
logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
if instance_ref['state'] != power_state.RUNNING:
logging.warn(_('trying to snapshot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
self.driver.snapshot(instance_ref, name)
@exception.wrap_exception
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
@@ -301,6 +335,16 @@ class ComputeManager(manager.Manager):
instance_id,
result))
@exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this server."""
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
logging.debug(_("instance %s: retrieving diagnostics"),
instance_ref["internal_id"])
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
def suspend_instance(self, context, instance_id):
"""suspend the instance with instance_id"""

80
nova/console/api.py Normal file
View File

@@ -0,0 +1,80 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles ConsoleProxy API requests
"""
from nova import exception
from nova.db import base
from nova import flags
from nova import rpc
FLAGS = flags.FLAGS
class ConsoleAPI(base.Base):
"""API for spining up or down console proxy connections"""
def __init__(self, **kwargs):
super(ConsoleAPI, self).__init__(**kwargs)
def get_consoles(self, context, instance_internal_id):
instance = self.db.instance_get_by_internal_id(context,
instance_internal_id)
return self.db.console_get_all_by_instance(context, instance['id'])
def get_console(self, context, instance_internal_id, console_id):
return self.db.console_get(context, console_id, instance_internal_id)
def delete_console(self, context, instance_internal_id, console_id):
instance = self.db.instance_get_by_internal_id(context,
instance_internal_id)
console = self.db.console_get(context,
console_id,
instance['id'])
pool = console['pool']
rpc.cast(context,
self.db.queue_get_for(context,
FLAGS.console_topic,
pool['host']),
{"method": "remove_console",
"args": {"console_id": console['id']}})
def create_console(self, context, instance_internal_id):
instance = self.db.instance_get_by_internal_id(context,
instance_internal_id)
#NOTE(mdragon): If we wanted to return this the console info
# here, as we would need to do a call.
# They can just do an index later to fetch
# console info. I am not sure which is better
# here.
rpc.cast(context,
self._get_console_topic(context, instance['host']),
{"method": "add_console",
"args": {"instance_id": instance['id']}})
def _get_console_topic(self, context, instance_host):
topic = self.db.queue_get_for(context,
FLAGS.compute_topic,
instance_host)
return rpc.call(context,
topic,
{"method": "get_console_topic", "args": {'fake': 1}})

View File

@@ -78,21 +78,15 @@ class ConsoleProxyManager(manager.Manager):
return console['id']
@exception.wrap_exception
def remove_console(self, context, instance_id, **_kwargs):
instance = self.db.instance_get(context, instance_id)
host = instance['host']
pool = self.get_pool_for_instance_host(context, host)
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance_id)
console = self.db.console_get(context, console_id)
except exception.NotFound:
logging.debug(_('Tried to remove non-existant console in pool '
'%(pool_id)s for instance %(instance_id)s.' %
{'instance_id' : instance_id,
'pool_id' : pool['id']}))
logging.debug(_('Tried to remove non-existant console '
'%(console_id)s.') %
{'console_id' : console_id})
return
self.db.console_delete(context, console['id'])
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)

View File

@@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from nova import exception
@@ -37,6 +40,8 @@ from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('db_backend', 'sqlalchemy',
'The backend to use for db')
flags.DEFINE_boolean('enable_new_services', True,
'Services to be added to the available pool on create')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
@@ -383,6 +388,11 @@ def instance_action_create(context, values):
return IMPL.instance_action_create(context, values)
def instance_get_actions(context, instance_id):
"""Get instance actions by instance id."""
return IMPL.instance_get_actions(context, instance_id)
###################
@@ -924,4 +934,12 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_id)
def console_get_all_by_instance(context, instance_id):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_id)
def console_get(context, console_id, instance_id=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_id)

View File

@@ -19,6 +19,25 @@
"""
SQLAlchemy database backend
"""
import logging
import time
from sqlalchemy.exc import OperationalError
from nova import flags
from nova.db.sqlalchemy import models
models.register_models()
FLAGS = flags.FLAGS
for i in xrange(FLAGS.sql_max_retries):
if i > 0:
time.sleep(FLAGS.sql_retry_interval)
try:
models.register_models()
break
except OperationalError:
logging.exception(_("Data store is unreachable."
" Trying again in %d seconds.") % FLAGS.sql_retry_interval)

View File

@@ -236,6 +236,8 @@ def service_get_by_args(context, host, binary):
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not FLAGS.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@@ -856,6 +858,18 @@ def instance_action_create(context, values):
return action_ref
@require_admin_context
def instance_get_actions(context, instance_id):
"""Return the actions associated to the given instance id"""
session = get_session()
actions = {}
for action in session.query(models.InstanceActions).\
filter_by(instance_id=instance_id).\
all():
actions[action.action] = action.error
return actions
###################
@@ -1943,4 +1957,25 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
'pool_id': pool_id})
return result
def console_get_all_by_instance(context, instance_id):
session = get_session()
results = session.query(models.Console).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
all()
return results
def console_get(context, console_id, instance_id=None):
session = get_session()
query = session.query(models.Console).\
filter_by(id=console_id)
if instance_id:
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
idesc = _(" on instance %(instance_id)s") if instance_id else ""
raise exception.NotFound(_("No console with id %(instance)s") %
{'instance' : idesc})
return result

View File

@@ -22,7 +22,7 @@ SQLAlchemy models for nova data.
import datetime
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, Float, String, schema
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
@@ -220,6 +220,8 @@ class Instance(BASE, NovaBase):
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
@@ -236,21 +238,6 @@ class Instance(BASE, NovaBase):
# 'shutdown', 'shutoff', 'crashed'])
class InstanceDiagnostics(BASE, NovaBase):
"""Represents a guest VM's diagnostics"""
__tablename__ = "instance_diagnostics"
id = Column(Integer, primary_key=True)
instance_id = Column(Integer, ForeignKey('instances.id'))
memory_available = Column(Float)
memory_free = Column(Float)
cpu_load = Column(Float)
disk_read = Column(Float)
disk_write = Column(Float)
net_tx = Column(Float)
net_rx = Column(Float)
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
@@ -452,7 +439,7 @@ class AuthToken(BASE, NovaBase):
"""
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(Integer)
user_id = Column(String(255))
server_manageent_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))
@@ -582,7 +569,7 @@ def register_models():
it will never need to be called explicitly elsewhere.
"""
from sqlalchemy import create_engine
models = (Service, Instance, InstanceDiagnostics, InstanceActions,
models = (Service, Instance, InstanceActions,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,

View File

@@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False):
global _MAKER
if not _MAKER:
if not _ENGINE:
_ENGINE = create_engine(FLAGS.sql_connection, echo=False)
_ENGINE = create_engine(FLAGS.sql_connection,
pool_recycle=FLAGS.sql_idle_timeout,
echo=False)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))

View File

@@ -212,6 +212,8 @@ DEFINE_list('region_list',
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
@@ -240,6 +242,7 @@ DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
'default image to use, testing only')
DEFINE_string('default_instance_type', 'm1.small',
@@ -261,6 +264,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite',
'connection string for sql database')
DEFINE_string('sql_idle_timeout',
'3600',
'timeout for idle sql database connections')
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')

View File

@@ -24,6 +24,7 @@ import urlparse
import webob.exc
from nova.compute import api as compute_api
from nova import utils
from nova import flags
from nova import exception

View File

@@ -37,6 +37,11 @@ class NoValidHost(exception.Error):
pass
class WillNotSchedule(exception.Error):
"""The specified host is not up or doesn't exist."""
pass
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""

View File

@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if instance_ref['availability_zone'] and context.is_admin:
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s is not alive" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_compute_sorted(context)
for result in results:
(service, instance_cores) = result
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
if (':' in volume_ref['availability_zone']) and context.is_admin:
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s not available" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_volume_sorted(context)
for result in results:
(service, volume_gigabytes) = result

View File

@@ -110,6 +110,12 @@ def stub_out_networking(stubs):
stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
return 123
stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot)
def stub_out_glance(stubs, initial_fixtures=[]):
class FakeParallaxClient:
@@ -213,6 +219,9 @@ class FakeAuthManager(object):
return v
return None
def get_project(self, pid):
return None
def get_user_from_access_key(self, key):
return FakeAuthManager.auth_data.get(key, None)

View File

@@ -50,7 +50,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
'serverId': None,
'instance_id': None,
'progress': None}
num_images = len(self.service.index(self.context))
@@ -67,7 +67,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
'serverId': None,
'instance_id': None,
'progress': None}
num_images = len(self.service.index(self.context))
@@ -87,7 +87,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
'serverId': None,
'instance_id': None,
'progress': None}
id = self.service.create(self.context, fixture)
@@ -105,13 +105,13 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
'serverId': None,
'instance_id': None,
'progress': None},
{'name': 'test image 2',
'updated': None,
'created': None,
'status': None,
'serverId': None,
'instance_id': None,
'progress': None}]
num_images = len(self.service.index(self.context))
@@ -155,6 +155,7 @@ class GlanceImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)

View File

@@ -95,6 +95,10 @@ class ServersTest(unittest.TestCase):
fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics",
fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions",
fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
@@ -274,6 +278,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET"
res = req.get_response(nova.api.API("os"))
self.assertEqual(res.status_int, 404)
def test_server_actions(self):
req = webob.Request.blank("/v1.0/servers/1/actions")
req.method = "GET"
res = req.get_response(nova.api.API("os"))
self.assertEqual(res.status_int, 404)
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},

View File

@@ -151,6 +151,14 @@ class ComputeTestCase(test.TestCase):
self.compute.reboot_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance_id)
self.compute.snapshot_instance(self.context, instance_id, name)
self.compute.terminate_instance(self.context, instance_id)
def test_console_output(self):
"""Make sure we can get console output from instance"""
instance_id = self._create_instance()

View File

@@ -120,15 +120,11 @@ class ConsoleTestCase(test.TestCase):
def test_remove_console(self):
instance_id = self._create_instance()
self.console.add_console(self.context, instance_id)
self.console.remove_console(self.context, instance_id)
console_id = self.console.add_console(self.context, instance_id)
self.console.remove_console(self.context, console_id)
instance = db.instance_get(self.context, instance_id)
pool = db.console_pool_get_by_host_type(self.context,
instance['host'],
self.console.host,
self.console.driver.console_type)
console_instances = [con['instance_id'] for con in pool.consoles]
self.assert_(instance_id not in console_instances)
self.assertRaises(exception.NotFound,
db.console_get,
self.context,
console_id)

View File

@@ -19,6 +19,8 @@
Tests For Scheduler
"""
import datetime
from nova import context
from nova import db
from nova import flags
@@ -33,6 +35,7 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -94,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
def _create_instance(self):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
@@ -105,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -113,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase):
vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
def test_hosts_are_up(self):
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
db.service_update(self.context, s2['id'], {'disabled': True})
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(0, len(hosts))
compute1.kill()
compute2.kill()
def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
@@ -130,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
self.assertEqual(2, len(hosts))
compute1.kill()
compute2.kill()
@@ -157,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
compute2.kill()
def test_specific_host_gets_instance(self):
"""Ensures if you set availability_zone it launches on that zone"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
instance_id2 = self._create_instance(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id2)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',

View File

@@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue
import mox
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import rpc
@@ -72,6 +74,30 @@ class ServiceManagerTestCase(test.TestCase):
self.assertEqual(serv.test_method(), 'service')
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(not ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(ref['disabled'])
class ServiceTestCase(test.TestCase):
"""Test cases for Services"""

View File

@@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',

View File

@@ -29,9 +29,9 @@ from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.tests.db import fakes
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
FLAGS = flags.FLAGS
@@ -47,9 +47,9 @@ class XenAPIVolumeTestCase(test.TestCase):
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
fakes.stub_out_db_instance_api(self.stubs)
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
fake.reset()
xenapi_fake.reset()
self.values = {'name': 1, 'id': 1,
'project_id': 'fake',
'user_id': 'fake',
@@ -83,7 +83,7 @@ class XenAPIVolumeTestCase(test.TestCase):
label = 'SR-%s' % vol['ec2_id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
srs = fake.get_all('SR')
srs = xenapi_fake.get_all('SR')
self.assertEqual(sr_ref, srs[0])
db.volume_destroy(context.get_admin_context(), vol['id'])
@@ -107,17 +107,17 @@ class XenAPIVolumeTestCase(test.TestCase):
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
fake.create_vm(instance.name, 'Running')
xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['ec2_id'],
'/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI reference for the VM
vms = fake.get_all('VM')
vms = xenapi_fake.get_all('VM')
# Get XenAPI record for VBD
vbds = fake.get_all('VBD')
vbd = fake.get_record('VBD', vbds[0])
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vms[0])
@@ -130,7 +130,7 @@ class XenAPIVolumeTestCase(test.TestCase):
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
fake.create_vm(instance.name, 'Running')
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
@@ -156,41 +156,70 @@ class XenAPIVMTestCase(test.TestCase):
self.stubs = stubout.StubOutForTesting()
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
fake.reset()
fakes.stub_out_db_instance_api(self.stubs)
fake.create_network('fake', FLAGS.flat_network_bridge)
xenapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
conn = xenapi_conn.get_connection(False)
instances = conn.list_instances()
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_spawn(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1, 'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
conn = xenapi_conn.get_connection(False)
instance = db.instance_create(values)
conn.spawn(instance)
def test_get_diagnostics(self):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [1])
def ensure_vbd_was_torn_down():
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [1])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def check():
instances = conn.list_instances()
ensure_vm_was_torn_down()
ensure_vbd_was_torn_down()
ensure_vdi_was_torn_down()
check()
def test_spawn(self):
instance = self._create_instance()
def check():
instances = self.conn.list_instances()
self.assertEquals(instances, [1])
# Get Nova record for VM
vm_info = conn.get_info(1)
vm_info = self.conn.get_info(1)
# Get XenAPI record for VM
vms = fake.get_all('VM')
vm = fake.get_record('VM', vms[0])
vms = xenapi_fake.get_all('VM')
vm = xenapi_fake.get_record('VM', vms[0])
# Check that m1.large above turned into the right thing.
instance_type = instance_types.INSTANCE_TYPES['m1.large']
@@ -218,3 +247,19 @@ class XenAPIVMTestCase(test.TestCase):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.stubs.UnsetAll()
def _create_instance(self):
"""Creates and spawns a test instance"""
values = {
'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance

View File

@@ -19,6 +19,54 @@
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
def fake_wait_for_task(self, id, task):
class FakeEvent:
def send(self, value):
self.rv = value
def wait(self):
return self.rv
done = FakeEvent()
self._poll_task(id, task, done)
rv = done.wait()
return rv
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
fake_wait_for_task)
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
sr_ref=sr_ref, sharable=False)
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
vdi_uuid = vdi_rec['uuid']
return vdi_uuid
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
def fake_parse_xmlrpc_value(val):
return val
stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
#TODO(sirp): Should we actually fake out the data here
return "fakeparent"
stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls):
@@ -63,6 +111,24 @@ class FakeSessionForVMTests(fake.SessionBase):
vm['is_a_template'] = False
vm['is_control_domain'] = False
def VM_snapshot(self, session_ref, vm_ref, label):
status = "Running"
template_vm_ref = fake.create_vm(label, status, is_a_template=True,
is_control_domain=False)
sr_ref = "fakesr"
template_vdi_ref = fake.create_vdi(label, read_only=True,
sr_ref=sr_ref, sharable=False)
template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
return template_vm_ref
def VDI_destroy(self, session_ref, vdi_ref):
fake.destroy_vdi(vdi_ref)
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """

View File

@@ -112,6 +112,20 @@ class FakeConnection(object):
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
def snapshot(self, instance, name):
"""
Snapshots the specified instance.
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
The second parameter is the name of the snapshot.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
"""
pass
def reboot(self, instance):
"""
Reboot the specified instance.
@@ -202,6 +216,9 @@ class FakeConnection(object):
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
pass
def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified

View File

@@ -58,10 +58,9 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
from Cheetah.Template import Template
libvirt = None
libxml2 = None
Template = None
FLAGS = flags.FLAGS
@@ -69,6 +68,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
flags.DEFINE_string('injected_network_template',
utils.abspath('virt/interfaces.template'),
'Template file for injected network')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -88,15 +90,26 @@ flags.DEFINE_bool('allow_project_net_traffic',
def get_connection(read_only):
# These are loaded late so that there's no need to install these
# libraries when not using libvirt.
# Cheetah is separate because the unit tests want to load Cheetah,
# but not libvirt.
global libvirt
global libxml2
if libvirt is None:
libvirt = __import__('libvirt')
if libxml2 is None:
libxml2 = __import__('libxml2')
_late_load_cheetah()
return LibvirtConnection(read_only)
def _late_load_cheetah():
global Template
if Template is None:
t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
-1)
Template = t.Template
def _get_net_and_mask(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.netmask())
@@ -246,6 +259,13 @@ class LibvirtConnection(object):
raise exception.NotFound(_("No disk at %s") % mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
def snapshot(self, instance, name):
""" Create snapshot from a running VM instance """
raise NotImplementedError(
_("Instance snapshotting is not supported for libvirt"
"at this time"))
@exception.wrap_exception
def reboot(self, instance):
self.destroy(instance, False)
@@ -567,6 +587,9 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
raise exception.APIError("diagnostics are not supported for libvirt")
def get_disks(self, instance_name):
"""
Note that this function takes an instance name, not an Instance, so

View File

@@ -55,6 +55,8 @@ import datetime
import logging
import uuid
from pprint import pformat
from nova import exception
@@ -64,6 +66,10 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
def log_db_contents(msg=None):
logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
def reset():
for c in _CLASSES:
_db_content[c] = {}
@@ -93,6 +99,24 @@ def create_vm(name_label, status,
})
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
for vbd_ref in vbd_refs:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, read_only, sr_ref, sharable):
return _create_object('VDI', {
'name_label': name_label,
@@ -109,6 +133,23 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
})
def create_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create backref from VM to VBD when VBD is created"""
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'] = [vbd_ref]
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
def create_pbd(config, sr_ref, attached):
return _create_object('PBD', {
'device-config': config,
@@ -277,11 +318,12 @@ class SessionBase(object):
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if (func == 'get_by_name_label' or
func == 'get_by_uuid'):
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1])
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
@@ -324,6 +366,13 @@ class SessionBase(object):
(cls, _) = name.split('.')
ref = is_sr_create and \
_create_sr(cls, params) or _create_object(cls, params[1])
# Call hook to provide any fixups needed (ex. creating backrefs)
try:
globals()["after_%s_create" % cls](ref, params[1])
except KeyError:
pass
obj = get_record(cls, ref)
# Add RO fields
@@ -359,11 +408,18 @@ class SessionBase(object):
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v):
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
return None
return result

View File

@@ -20,11 +20,14 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import logging
import pickle
import urllib
from xml.dom import minidom
from eventlet import event
from nova import exception
from nova import flags
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
@@ -204,7 +207,54 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
def fetch_image(cls, session, image, user, project, type):
def create_snapshot(cls, session, instance_id, vm_ref, label):
""" Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
logging.debug(_("Snapshotting VM %s with label '%s'..."),
vm_ref, label)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
template_vm_ref = session.wait_for_task(instance_id, task)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
vm_ref)
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
return template_vm_ref, [template_vdi_uuid, parent_uuid]
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_name):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
logging.debug(_("Asking xapi to upload %s as '%s'"),
vdi_uuids, image_name)
params = {'vdi_uuids': vdi_uuids,
'image_name': image_name,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'put_vdis', kwargs)
session.wait_for_task(instance_id, task)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project, type):
"""
type is interpreted as an ImageType instance
"""
@@ -223,9 +273,7 @@ class VMHelper(HelperBase):
if type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
#FIXME(armando): find a solution to missing instance_id
#with Josh Kearney
uuid = session.wait_for_task(0, task)
uuid = session.wait_for_task(instance_id, task)
return uuid
@classmethod
@@ -299,6 +347,10 @@ class VMHelper(HelperBase):
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
except (cls.XenAPI.Failure, KeyError) as e:
return {"Unable to retrieve diagnostics": e}
try:
diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
@@ -325,3 +377,87 @@ def get_rrd(host, uuid):
return xml.read()
except IOError:
return None
#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
# use that implmenetation
def get_vhd_parent(session, vdi_rec):
"""
Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
Returns None if we're at the root of the tree.
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
#NOTE(sirp): changed xenapi -> get_xenapi()
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
#NOTE(sirp): changed log -> logging
logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
return parent_ref, parent_rec
else:
return None
def get_vhd_parent_uuid(session, vdi_ref):
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
ret = get_vhd_parent(session, vdi_rec)
if ret:
parent_ref, parent_rec = ret
return parent_rec["uuid"]
else:
return None
def scan_sr(session, instance_id, sr_ref):
logging.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(instance_id, task)
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
Before coalesce:
* original_parent_vhd
* parent_vhd
snapshot
Atter coalesce:
* parent_vhd
snapshot
"""
#TODO(sirp): we need to timeout this req after a while
def _poll_vhds():
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
logging.debug(
_("Parent %s doesn't match original parent %s, "
"waiting for coalesce..."),
parent_uuid, original_parent_uuid)
else:
done.send(parent_uuid)
done = event.Event()
loop = utils.LoopingCall(_poll_vhds)
loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
parent_uuid = done.wait()
loop.stop()
return parent_uuid
def get_vdi_for_vm_safely(session, vm_ref):
vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
if vdi_refs is None:
raise Exception(_("No VDIs found for VM %s") % vm_ref)
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(_("Unexpected number of VDIs (%s) found for "
"VM %s") % (num_vdis, vm_ref))
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec

View File

@@ -70,7 +70,7 @@ class VMOps(object):
disk_image_type = ImageType.DISK
else:
disk_image_type = ImageType.DISK_RAW
vdi_uuid = VMHelper.fetch_image(self._session,
vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
#Have a look at the VDI and see if it has a PV kernel
@@ -79,11 +79,11 @@ class VMOps(object):
pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session,
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session,
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
@@ -120,6 +120,52 @@ class VMOps(object):
timer.f = _wait_for_boot
return timer.start(interval=0.5, now=True)
def snapshot(self, instance, name):
""" Create snapshot from a running VM instance
:param instance: instance to be snapshotted
:param name: name/label to be given to the snapshot
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
that will bundle the VHDs together and then push the bundle into
Glance.
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
logging.debug(_("Starting snapshot for VM %s"), instance)
vm_ref = VMHelper.lookup(self._session, instance.name)
label = "%s-snapshot" % instance.name
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
return
try:
# call plugin to ship snapshot off to glance
VMHelper.upload_image(
self._session, instance.id, template_vdi_uuids, name)
finally:
self._destroy(instance, template_vm_ref, shutdown=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
def reboot(self, instance):
"""Reboot VM instance"""
instance_name = instance.name
@@ -133,31 +179,36 @@ class VMOps(object):
def destroy(self, instance):
"""Destroy VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
def _destroy(self, instance, vm, shutdown=True):
""" Destroy VM instance """
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
return
# Get the VDIs related to the VM
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown',
vm)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
if shutdown:
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
logging.warn(exc)
# Disk clean-up
if vdis:
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
except self.XenAPI.Failure, exc:
logging.warn(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
except self.XenAPI.Failure, exc:
logging.warn(exc)
def _wait_with_callback(self, instance_id, task, callback):
@@ -217,11 +268,12 @@ class VMOps(object):
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
def get_diagnostics(self, instance_id):
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
vm = VMHelper.lookup(self._session, instance_id)
vm = VMHelper.lookup(self._session, instance.name)
if vm is None:
raise exception.NotFound(_("Instance not found %s") % instance_id)
raise exception.NotFound(_("Instance not found %s") %
instance.name)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_diagnostics(self._session, rec)

View File

@@ -84,6 +84,10 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'
' Used only if connection_type=xenapi.')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -132,6 +136,10 @@ class XenAPIConnection(object):
"""Create VM instance"""
self._vmops.spawn(instance)
def snapshot(self, instance, name):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, name)
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -160,9 +168,9 @@ class XenAPIConnection(object):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
def get_diagnostics(self, instance_id):
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
return self._vmops.get_diagnostics(instance_id)
return self._vmops.get_diagnostics(instance)
def get_console_output(self, instance):
"""Return snapshot of console"""
@@ -240,8 +248,8 @@ class XenAPISession(object):
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
action = dict(
id=int(id),
action=name,
instance_id=int(id),
action=name[0:255], # Ensure action is never > 255
error=None)
if status == "pending":
return

View File

@@ -270,7 +270,7 @@ class Serializer(object):
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
req = webob.Request(environ)
req = webob.Request.blank('', environ)
suffix = req.path_info.split('.')[-1].lower()
if suffix == 'json':
self.handler = self._to_json

View File

@@ -0,0 +1,144 @@
Multi Tenancy Networking Protections in XenServer
=================================================
The purpose of the vif_rules script is to allow multi-tenancy on a XenServer
host. In a multi-tenant cloud environment a host machine needs to be able to
enforce network isolation amongst guest instances, at both layer two and layer
three. The rules prevent guests from taking and using unauthorized IP addresses,
sniffing other guests traffic, and prevents ARP poisoning attacks. This current
revision only supports IPv4, but will support IPv6 in the future.
Kernel Requirements
===================
- physdev module
- arptables support
- ebtables support
- iptables support
If the kernel doesn't support these, you will need to obtain the Source RPMS for
the proper version of XenServer to recompile the dom0 kernel.
XenServer Requirements (32-bit dom0)
====================================
- arptables 32-bit rpm
- ebtables 32-bit rpm
- python-simplejson
XenServer Environment Specific Notes
====================================
- XenServer 5.5 U1 based on the 2.6.18 kernel didn't include physdev module
support. Support for this had to be recompiled into the kernel.
- XenServer 5.6 based on the 2.6.27 kernel didn't include physdev, ebtables, or
arptables.
- XenServer 5.6 FP1 didn't include physdev, ebtables, or arptables but they do
have a Cloud Supplemental pack available to partners which swaps out the
kernels for kernels that support the networking rules.
How it works - tl;dr
====================
iptables, ebtables, and arptables drop rules are applied to all forward chains
on the host. These are applied at boot time with an init script. They ensure
all forwarded packets are dropped by default. Allow rules are then applied to
the instances to ensure they have permission to talk on the internet.
How it works - Long
===================
Any time an underprivileged domain or domU is started or stopped, it gets a
unique domain id (dom_id). This dom_id is utilized in a number of places, one
of which is it's assigned to the virtual interface (vif). The vifs are attached
to the bridge that is attached to the physical network. For instance, if you
had a public bridge attached to eth0 and your domain id was 5, your vif would be
vif5.0.
The networking rules are applied to the VIF directly so they apply at the lowest
level of the networking stack. Because the VIF changes along with the domain id
on any start, stop, or reboot of the instance, the rules need to be removed and
re-added any time that occurs.
Because the dom_id can change often, the vif_rules script is hooked into the
/etc/xensource/scripts/vif script that gets called anytime an instance is
started, or stopped, which includes pauses and resumes.
Examples of the rules ran for the host on boot:
iptables -P FORWARD DROP
iptables -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
ebtables -P FORWARD DROP
ebtables -A FORWARD -o eth0 -j ACCEPT
arptables -P FORWARD DROP
arptables -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
arptables -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
Examples of the rules that are ran per instance state change:
iptables -A FORWARD -m physdev --physdev-in vif1.0 -s 10.1.135.22/32 -j ACCEPT
arptables -A FORWARD --opcode Request --in-interface "vif1.0" \
--source-ip 10.1.135.22 -j ACCEPT
arptables -A FORWARD --opcode Reply --in-interface "vif1.0" \
--source-ip 10.1.135.22 --source-mac 9e:6e:cc:19:7f:fe -j ACCEPT
ebtables -A FORWARD -p 0806 -o vif1.0 --arp-ip-dst 10.1.135.22 -j ACCEPT
ebtables -A FORWARD -p 0800 -o vif1.0 --ip-dst 10.1.135.22 -j ACCEPT
ebtables -I FORWARD 1 -s ! 9e:6e:cc:19:7f:fe -i vif1.0 -j DROP
Typically when you see a vif, it'll look like vif<domain id>.<network bridge>.
vif2.1 for example would be domain 2 on the second interface.
The vif_rules.py script needs to pull information about the IPs and MAC
addresses assigned to the instance. The current implementation assumes that
information is put into the VM Record into the xenstore-data key in a JSON
string. The vif_rules.py script reads out of the JSON string to determine the
IPs, and MAC addresses to protect.
An example format is given below:
# xe vm-param-get uuid=<uuid> param-name=xenstore-data
xenstore-data (MRW):
vm-data/networking/4040fa7292e4:
{"label": "public",
"ips": [{"netmask":"255.255.255.0",
"enabled":"1",
"ip":"173.200.100.10"}],
"mac":"40:40:fa:72:92:e4",
"gateway":"173.200.100.1",
"vm_id":"123456",
"dns":["72.3.128.240","72.3.128.241"]};
vm-data/networking/40402321c9b8:
{"label":"private",
"ips":[{"netmask":"255.255.224.0",
"enabled":"1",
"ip":"10.177.10.10"}],
"routes":[{"route":"10.176.0.0",
"netmask":"255.248.0.0",
"gateway":"10.177.10.1"},
{"route":"10.191.192.0",
"netmask":"255.255.192.0",
"gateway":"10.177.10.1"}],
"mac":"40:40:23:21:c9:b8"}
The key is used for two purposes. One, the vif_rules.py script will read from
it to apply the rules needed after parsing the JSON. The second is that because
it's put into the xenstore-data field, the xenstore will be populated with this
data on boot. This allows a guest agent the ability to read out data about the
instance and apply configurations as needed.
Installation
============
- Copy host-rules into /etc/init.d/ and make sure to chmod +x host-rules.
- Run 'chkconfig host-rules on' to add the init script to start up.
- Copy vif_rules.py into /etc/xensource/scripts
- Patch /etc/xensource/scripts/vif using the supplied patch file. It may vary
for different versions of XenServer but it should be pretty self explanatory.
It calls the vif_rules.py script on domain creation and tear down.
- Run '/etc/init.d/host-rules start' to start up the host based rules.
- The instance rules will then fire on creation of the VM as long as the correct
JSON is in place.
- You can check to see if the rules are in place with: iptables --list,
arptables --list, or ebtables --list

View File

@@ -0,0 +1,106 @@
#!/bin/bash
#
# host-rules Start/Stop the networking host rules
#
# chkconfig: 2345 85 15
# description: Networking Host Rules for Multi Tenancy Protections
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
IPTABLES=/sbin/iptables
EBTABLES=/sbin/ebtables
ARPTABLES=/sbin/arptables
iptables-up()
{
$IPTABLES -P FORWARD DROP
$IPTABLES -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
$IPTABLES -A FORWARD -m physdev --physdev-in eth1 -j ACCEPT
}
ebtables-up()
{
$EBTABLES -P FORWARD DROP
$EBTABLES -A FORWARD -o eth0 -j ACCEPT
$EBTABLES -A FORWARD -o eth1 -j ACCEPT
}
arptables-up()
{
$ARPTABLES -P FORWARD DROP
$ARPTABLES -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
$ARPTABLES -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
$ARPTABLES -A FORWARD --opcode Request --in-interface eth1 -j ACCEPT
$ARPTABLES -A FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
}
iptables-down()
{
$IPTABLES -P FORWARD ACCEPT
$IPTABLES -D FORWARD -m physdev --physdev-in eth0 -j ACCEPT
$IPTABLES -D FORWARD -m physdev --physdev-in eth1 -j ACCEPT
}
ebtables-down()
{
$EBTABLES -P FORWARD ACCEPT
$EBTABLES -D FORWARD -o eth0 -j ACCEPT
$EBTABLES -D FORWARD -o eth1 -j ACCEPT
}
arptables-down()
{
$ARPTABLES -P FORWARD ACCEPT
$ARPTABLES -D FORWARD --opcode Request --in-interface eth0 -j ACCEPT
$ARPTABLES -D FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
$ARPTABLES -D FORWARD --opcode Request --in-interface eth1 -j ACCEPT
$ARPTABLES -D FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
}
start()
{
iptables-up
ebtables-up
arptables-up
}
stop()
{
iptables-down
ebtables-down
arptables-down
}
case "$1" in
start)
start
RETVAL=$?
;;
stop)
stop
RETVAL=$?
;;
restart)
stop
start
RETVAL=$?
;;
*)
echo $"Usage: $0 {start|stop|restart}"
exit 1
;;
esac
exit $RETVAL

View File

@@ -0,0 +1,22 @@
--- vif 2010-12-20 16:39:46.000000000 +0000
+++ vif_modified 2010-11-19 23:24:37.000000000 +0000
@@ -213,6 +213,7 @@
# xs-xen.pq.hq:91e986b8e49f netback-wait-for-hotplug
xenstore-write "/local/domain/0/backend/vif/${DOMID}/${DEVID}/hotplug-status" "connected"
+ python /etc/xensource/scripts/vif_rules.py ${DOMID} online 2>&1 > /dev/null
fi
;;
@@ -224,9 +225,11 @@
remove)
if [ "${TYPE}" = "vif" ] ;then
+ python /etc/xensource/scripts/vif_rules.py ${DOMID} offline 2>&1 > /dev/null
xenstore-rm "${HOTPLUG}/hotplug"
fi
logger -t scripts-vif "${dev} has been removed"
remove_from_bridge
;;
esac
+

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure iptables, ebtables, and arptables rules on
XenServer hosts.
"""
import os
import subprocess
import sys
# This is written to Python 2.4, since that is what is available on XenServer
import simplejson as json
def main(dom_id, command, only_this_vif=None):
xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
% dom_id, True)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
xsread = execute(xsr % (dom_id, mac), True)
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
vif = "vif%s.0" % dom_id
else:
vif = "vif%s.1" % dom_id
if (only_this_vif is None) or (vif == only_this_vif):
params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac'])
apply_ebtables_rules(command, params)
apply_arptables_rules(command, params)
apply_iptables_rules(command, params)
def execute(command, return_stdout=False):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, shell=True, close_fds=True,
stdout=subprocess.PIPE, stderr=devnull)
devnull.close()
if return_stdout:
return proc.stdout.read()
else:
return None
# A note about adding rules:
# Whenever we add any rule to iptables, arptables or ebtables we first
# delete the same rule to ensure the rule only exists once.
def apply_iptables_rules(command, params):
iptables = lambda rule: execute("/sbin/iptables %s" % rule)
iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
-j ACCEPT" % params)
if command == 'online':
iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
-j ACCEPT" % params)
def apply_arptables_rules(command, params):
arptables = lambda rule: execute("/sbin/arptables %s" % rule)
arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
if command == 'online':
arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
def apply_ebtables_rules(command, params):
ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
params)
ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
params)
if command == 'online':
ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
-j ACCEPT" % params)
ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
-j ACCEPT" % params)
ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
if command == 'online':
ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
if __name__ == "__main__":
if len(sys.argv) < 3:
print "usage: %s dom_id online|offline [vif]" % \
os.path.basename(sys.argv[0])
sys.exit(1)
else:
dom_id, command = sys.argv[1:3]
vif = len(sys.argv) == 4 and sys.argv[3] or None
main(dom_id, command, vif)

View File

@@ -0,0 +1,132 @@
#!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for putting images into glance
#
import base64
import errno
import hmac
import httplib
import os
import os.path
import pickle
import sha
import subprocess
import time
import urlparse
import XenAPIPlugin
#FIXME(sirp): should this use pluginlib from 5.6?
from pluginlib_nova import *
configure_logging('glance')
CHUNK_SIZE = 8192
FILE_SR_PATH = '/var/run/sr-mount'
def put_vdis(session, args):
params = pickle.loads(exists(args, 'params'))
vdi_uuids = params["vdi_uuids"]
image_name = params["image_name"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
sr_path = get_sr_path(session)
#FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
tmp_file = "%s.tar.gz" % os.path.join('/tmp', image_name)
tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ]
tar_cmd.extend(paths)
logging.debug("Bundling image with cmd: %s", tar_cmd)
subprocess.call(tar_cmd)
logging.debug("Writing to test file %s", tmp_file)
put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port)
return "" # FIXME(sirp): return anything useful here?
def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port):
size = os.path.getsize(tmp_file)
basename = os.path.basename(tmp_file)
bundle = open(tmp_file, 'r')
try:
headers = {
'x-image-meta-store': 'file',
'x-image-meta-is_public': 'True',
'x-image-meta-type': 'raw',
'x-image-meta-name': image_name,
'x-image-meta-size': size,
'content-length': size,
'content-type': 'application/octet-stream',
}
conn = httplib.HTTPConnection(glance_host, glance_port)
#NOTE(sirp): httplib under python2.4 won't accept a file-like object
# to request
conn.putrequest('POST', '/images')
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
chunk = bundle.read(CHUNK_SIZE)
while chunk:
conn.send(chunk)
chunk = bundle.read(CHUNK_SIZE)
res = conn.getresponse()
#FIXME(sirp): should this be 201 Created?
if res.status != httplib.OK:
raise Exception("Unexpected response from Glance %i" % res.status)
finally:
bundle.close()
def get_sr_path(session):
sr_ref = find_sr(session)
if sr_ref is None:
raise Exception('Cannot find SR to read VDI from')
sr_rec = session.xenapi.SR.get_record(sr_ref)
sr_uuid = sr_rec["uuid"]
sr_path = os.path.join(FILE_SR_PATH, sr_uuid)
return sr_path
#TODO(sirp): both objectstore and glance need this, should this be refactored
#into common lib
def find_sr(session):
host = get_this_host(session)
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_rec = session.xenapi.SR.get_record(sr)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
for pbd in sr_rec['PBDs']:
pbd_rec = session.xenapi.PBD.get_record(pbd)
if pbd_rec['host'] == host:
return sr
return None
if __name__ == '__main__':
XenAPIPlugin.dispatch({'put_vdis': put_vdis})

68
run_tests.py Normal file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sys
from nose import config
from nose import result
from nose import core
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
result.TextTestResult.__init__(self, *args, **kw)
self._last_case = None
def getDescription(self, test):
return str(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
if self.showAll:
if current_case != self._last_case:
self.stream.writeln(current_case)
self._last_case = current_case
self.stream.write(
' %s' % str(test.test._testMethodName).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
if __name__ == '__main__':
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3)
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
sys.exit(not core.run(config=c, testRunner=runner))

View File

@@ -32,16 +32,17 @@ never_venv=0
force=0
noseargs=
for arg in "$@"; do
process_option $arg
done
NOSETESTS="nosetests -v $noseargs"
NOSETESTS="python run_tests.py $noseargs"
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
rm -f nova.sqlite
$NOSETESTS
$NOSETESTS 2> run_tests.err.log
exit
fi
@@ -53,7 +54,7 @@ fi
if [ -e ${venv} ]; then
${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS
${with_venv} $NOSETESTS 2> run_tests.err.log
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
@@ -66,10 +67,10 @@ else
python tools/install_venv.py
else
rm -f nova.sqlite
$NOSETESTS
$NOSETESTS 2> run_tests.err.log
exit
fi
fi
${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS
${with_venv} $NOSETESTS 2> run_tests.err.log
fi

View File

@@ -23,3 +23,5 @@ greenlet==0.3.1
nose
bzr
Twisted>=10.1.0
PasteDeploy
paste