Merged trunk.
This commit is contained in:
@@ -5,13 +5,7 @@ _trial_temp
|
||||
keys
|
||||
networks
|
||||
nova.sqlite
|
||||
CA/cacert.pem
|
||||
CA/crl.pem
|
||||
CA/index.txt*
|
||||
CA/openssl.cnf
|
||||
CA/serial*
|
||||
CA/newcerts/*.pem
|
||||
CA/private/cakey.pem
|
||||
CA
|
||||
nova/vcsversion.py
|
||||
*.DS_Store
|
||||
.project
|
||||
|
||||
1
.mailmap
1
.mailmap
@@ -4,6 +4,7 @@
|
||||
<anotherjesse@gmail.com> <jesse@dancelamb>
|
||||
<anotherjesse@gmail.com> <jesse@gigantor.local>
|
||||
<anotherjesse@gmail.com> <jesse@ubuntu>
|
||||
<anotherjesse@gmail.com> <jesse@aire.local>
|
||||
<ant@openstack.org> <amesserl@rackspace.com>
|
||||
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
|
||||
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
|
||||
|
||||
2
Authors
2
Authors
@@ -31,6 +31,7 @@ Jay Pipes <jaypipes@gmail.com>
|
||||
Jesse Andrews <anotherjesse@gmail.com>
|
||||
Joe Heck <heckj@mac.com>
|
||||
Joel Moore <joelbm24@gmail.com>
|
||||
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
|
||||
John Dewey <john@dewey.ws>
|
||||
John Tran <jtran@attinteractive.com>
|
||||
Jonathan Bryce <jbryce@jbryce.com>
|
||||
@@ -73,5 +74,6 @@ Trey Morris <trey.morris@rackspace.com>
|
||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||
|
||||
141
bin/nova-manage
141
bin/nova-manage
@@ -570,6 +570,49 @@ class NetworkCommands(object):
|
||||
class VmCommands(object):
|
||||
"""Class for mangaging VM instances."""
|
||||
|
||||
def list(self, host=None):
|
||||
"""Show a list of all instances
|
||||
|
||||
:param host: show all instance on specified host.
|
||||
:param instance: show specificed instance.
|
||||
"""
|
||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
||||
" %-10s %-10s %-10s %-5s" % (
|
||||
_('instance'),
|
||||
_('node'),
|
||||
_('type'),
|
||||
_('state'),
|
||||
_('launched'),
|
||||
_('image'),
|
||||
_('kernel'),
|
||||
_('ramdisk'),
|
||||
_('project'),
|
||||
_('user'),
|
||||
_('zone'),
|
||||
_('index'))
|
||||
|
||||
if host == None:
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
else:
|
||||
instances = db.instance_get_all_by_host(
|
||||
context.get_admin_context(), host)
|
||||
|
||||
for instance in instances:
|
||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
||||
" %-10s %-10s %-10s %-5d" % (
|
||||
instance['hostname'],
|
||||
instance['host'],
|
||||
instance['instance_type'],
|
||||
instance['state_description'],
|
||||
instance['launched_at'],
|
||||
instance['image_id'],
|
||||
instance['kernel_id'],
|
||||
instance['ramdisk_id'],
|
||||
instance['project_id'],
|
||||
instance['user_id'],
|
||||
instance['availability_zone'],
|
||||
instance['launch_index'])
|
||||
|
||||
def live_migration(self, ec2_id, dest):
|
||||
"""Migrates a running instance to a new machine.
|
||||
|
||||
@@ -701,15 +744,6 @@ class ServiceCommands(object):
|
||||
{"method": "update_available_resource"})
|
||||
|
||||
|
||||
class LogCommands(object):
|
||||
def request(self, request_id, logfile='/var/log/nova.log'):
|
||||
"""Show all fields in the log for the given request. Assumes you
|
||||
haven't changed the log format too much.
|
||||
ARGS: request_id [logfile]"""
|
||||
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
|
||||
print re.sub('#012', "\n", "\n".join(lines))
|
||||
|
||||
|
||||
class DbCommands(object):
|
||||
"""Class for managing the database."""
|
||||
|
||||
@@ -725,49 +759,6 @@ class DbCommands(object):
|
||||
print migration.db_version()
|
||||
|
||||
|
||||
class InstanceCommands(object):
|
||||
"""Class for managing instances."""
|
||||
|
||||
def list(self, host=None, instance=None):
|
||||
"""Show a list of all instances"""
|
||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
||||
" %-10s %-10s %-10s %-5s" % (
|
||||
_('instance'),
|
||||
_('node'),
|
||||
_('type'),
|
||||
_('state'),
|
||||
_('launched'),
|
||||
_('image'),
|
||||
_('kernel'),
|
||||
_('ramdisk'),
|
||||
_('project'),
|
||||
_('user'),
|
||||
_('zone'),
|
||||
_('index'))
|
||||
|
||||
if host == None:
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
else:
|
||||
instances = db.instance_get_all_by_host(
|
||||
context.get_admin_context(), host)
|
||||
|
||||
for instance in instances:
|
||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
||||
" %-10s %-10s %-10s %-5d" % (
|
||||
instance['hostname'],
|
||||
instance['host'],
|
||||
instance['instance_type'],
|
||||
instance['state_description'],
|
||||
instance['launched_at'],
|
||||
instance['image_id'],
|
||||
instance['kernel_id'],
|
||||
instance['ramdisk_id'],
|
||||
instance['project_id'],
|
||||
instance['user_id'],
|
||||
instance['availability_zone'],
|
||||
instance['launch_index'])
|
||||
|
||||
|
||||
class VolumeCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
@@ -878,7 +869,7 @@ class InstanceTypeCommands(object):
|
||||
elif name == "--all":
|
||||
inst_types = instance_types.get_all_types(True)
|
||||
else:
|
||||
inst_types = instance_types.get_instance_type(name)
|
||||
inst_types = instance_types.get_instance_type_by_name(name)
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
if isinstance(inst_types.values()[0], dict):
|
||||
@@ -894,20 +885,17 @@ class ImageCommands(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
def _register(self, image_type, disk_format, container_format,
|
||||
def _register(self, container_format, disk_format,
|
||||
path, owner, name=None, is_public='T',
|
||||
architecture='x86_64', kernel_id=None, ramdisk_id=None):
|
||||
meta = {'is_public': True,
|
||||
meta = {'is_public': (is_public == 'T'),
|
||||
'name': name,
|
||||
'disk_format': disk_format,
|
||||
'container_format': container_format,
|
||||
'disk_format': disk_format,
|
||||
'properties': {'image_state': 'available',
|
||||
'owner_id': owner,
|
||||
'type': image_type,
|
||||
'project_id': owner,
|
||||
'architecture': architecture,
|
||||
'image_location': 'local',
|
||||
'is_public': (is_public == 'T')}}
|
||||
print image_type, meta
|
||||
'image_location': 'local'}}
|
||||
if kernel_id:
|
||||
meta['properties']['kernel_id'] = int(kernel_id)
|
||||
if ramdisk_id:
|
||||
@@ -932,16 +920,18 @@ class ImageCommands(object):
|
||||
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
|
||||
is_public, architecture)
|
||||
self.image_register(image, owner, name, is_public,
|
||||
architecture, kernel_id, ramdisk_id)
|
||||
architecture, 'ami', 'ami',
|
||||
kernel_id, ramdisk_id)
|
||||
|
||||
def image_register(self, path, owner, name=None, is_public='T',
|
||||
architecture='x86_64', kernel_id=None, ramdisk_id=None,
|
||||
disk_format='ami', container_format='ami'):
|
||||
architecture='x86_64', container_format='bare',
|
||||
disk_format='raw', kernel_id=None, ramdisk_id=None):
|
||||
"""Uploads an image into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
[container_format='bare'] [disk_format='raw']
|
||||
[kernel_id=None] [ramdisk_id=None]
|
||||
[disk_format='ami'] [container_format='ami']"""
|
||||
return self._register('machine', disk_format, container_format, path,
|
||||
"""
|
||||
return self._register(container_format, disk_format, path,
|
||||
owner, name, is_public, architecture,
|
||||
kernel_id, ramdisk_id)
|
||||
|
||||
@@ -950,7 +940,7 @@ class ImageCommands(object):
|
||||
"""Uploads a kernel into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
"""
|
||||
return self._register('kernel', 'aki', 'aki', path, owner, name,
|
||||
return self._register('aki', 'aki', path, owner, name,
|
||||
is_public, architecture)
|
||||
|
||||
def ramdisk_register(self, path, owner, name=None, is_public='T',
|
||||
@@ -958,7 +948,7 @@ class ImageCommands(object):
|
||||
"""Uploads a ramdisk into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
"""
|
||||
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
|
||||
return self._register('ari', 'ari', path, owner, name,
|
||||
is_public, architecture)
|
||||
|
||||
def _lookup(self, old_image_id):
|
||||
@@ -975,16 +965,17 @@ class ImageCommands(object):
|
||||
'ramdisk': 'ari'}
|
||||
container_format = mapping[old['type']]
|
||||
disk_format = container_format
|
||||
if container_format == 'ami' and not old.get('kernelId'):
|
||||
container_format = 'bare'
|
||||
disk_format = 'raw'
|
||||
new = {'disk_format': disk_format,
|
||||
'container_format': container_format,
|
||||
'is_public': True,
|
||||
'is_public': old['isPublic'],
|
||||
'name': old['imageId'],
|
||||
'properties': {'image_state': old['imageState'],
|
||||
'owner_id': old['imageOwnerId'],
|
||||
'project_id': old['imageOwnerId'],
|
||||
'architecture': old['architecture'],
|
||||
'type': old['type'],
|
||||
'image_location': old['imageLocation'],
|
||||
'is_public': old['isPublic']}}
|
||||
'image_location': old['imageLocation']}}
|
||||
if old.get('kernelId'):
|
||||
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
|
||||
if old.get('ramdiskId'):
|
||||
@@ -1049,13 +1040,11 @@ CATEGORIES = [
|
||||
('network', NetworkCommands),
|
||||
('vm', VmCommands),
|
||||
('service', ServiceCommands),
|
||||
('log', LogCommands),
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands),
|
||||
('instance_type', InstanceTypeCommands),
|
||||
('image', ImageCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('instance', InstanceCommands)]
|
||||
('flavor', InstanceTypeCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -38,6 +38,46 @@ The cloudpipe image is basically just a linux instance with openvpn installed.
|
||||
It is also useful to have a cron script that will periodically redownload the metadata and copy the new crl. This will keep revoked users from connecting and will disconnect any users that are connected with revoked certificates when their connection is renegotiated (every hour).
|
||||
|
||||
|
||||
Creating a Cloudpipe Image
|
||||
--------------------------
|
||||
|
||||
Making a cloudpipe image is relatively easy.
|
||||
|
||||
# install openvpn on a base ubuntu image.
|
||||
# set up a server.conf.template in /etc/openvpn/
|
||||
|
||||
.. literalinclude:: server.conf.template
|
||||
:language: bash
|
||||
:linenos:
|
||||
|
||||
# set up.sh in /etc/openvpn/
|
||||
|
||||
.. literalinclude:: up.sh
|
||||
:language: bash
|
||||
:linenos:
|
||||
|
||||
# set down.sh in /etc/openvpn/
|
||||
|
||||
.. literalinclude:: down.sh
|
||||
:language: bash
|
||||
:linenos:
|
||||
|
||||
# download and run the payload on boot from /etc/rc.local.
|
||||
|
||||
.. literalinclude:: rc.local
|
||||
:language: bash
|
||||
:linenos:
|
||||
|
||||
# register the image and set the image id in your flagfile::
|
||||
|
||||
--vpn_image_id=ami-xxxxxxxx
|
||||
|
||||
# you should set a few other flags to make vpns work properly::
|
||||
|
||||
--use_project_ca
|
||||
--cnt_vpn_clients=5
|
||||
|
||||
|
||||
Cloudpipe Launch
|
||||
----------------
|
||||
|
||||
@@ -63,6 +103,31 @@ Certificates and Revocation
|
||||
|
||||
If the use_project_ca flag is set (required to for cloudpipes to work securely), then each project has its own ca. This ca is used to sign the certificate for the vpn, and is also passed to the user for bundling images. When a certificate is revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As long as cloudpipe has an updated crl, it will block revoked users from connecting to the vpn.
|
||||
|
||||
The userdata for cloudpipe isn't currently updated when certs are revoked, so it is necessary to restart the cloudpipe instance if a user's credentials are revoked.
|
||||
|
||||
|
||||
Restarting Cloudpipe VPN
|
||||
------------------------
|
||||
|
||||
You can reboot a cloudpipe vpn through the api if something goes wrong (using euca-reboot-instances for example), but if you generate a new crl, you will have to terminate it and start it again using nova-manage vpn run. The cloudpipe instance always gets the first ip in the subnet and it can take up to 10 minutes for the ip to be recovered. If you try to start the new vpn instance too soon, the instance will fail to start because of a NoMoreAddresses error. If you can't wait 10 minutes, you can manually update the ip with something like the following (use the right ip for the project)::
|
||||
|
||||
euca-terminate-instances <instance_id>
|
||||
mysql nova -e "update fixed_ips set allocated=0, leased=0, instance_id=NULL where fixed_ip='10.0.0.2'"
|
||||
|
||||
You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file)::
|
||||
|
||||
sudo kill `cat /var/lib/nova/br100.pid`
|
||||
|
||||
Now you should be able to re-run the vpn::
|
||||
|
||||
nova-manage vpn run <project_id>
|
||||
|
||||
|
||||
Logging into Cloudpipe VPN
|
||||
--------------------------
|
||||
|
||||
The keypair that was used to launch the cloudpipe instance should be in the keys/<project_id> folder. You can use this key to log into the cloudpipe instance for debugging purposes.
|
||||
|
||||
|
||||
The :mod:`nova.cloudpipe.pipelib` Module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
36
doc/source/devref/rc.local
Normal file
36
doc/source/devref/rc.local
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh -e
|
||||
#
|
||||
# rc.local
|
||||
#
|
||||
# This script is executed at the end of each multiuser runlevel.
|
||||
# Make sure that the script will "exit 0" on success or any other
|
||||
# value on error.
|
||||
#
|
||||
# In order to enable or disable this script just change the execution
|
||||
# bits.
|
||||
#
|
||||
# By default this script does nothing.
|
||||
####### These lines go at the end of /etc/rc.local #######
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
echo Downloading payload from userdata
|
||||
wget http://169.254.169.254/latest/user-data -O /tmp/payload.b64
|
||||
echo Decrypting base64 payload
|
||||
openssl enc -d -base64 -in /tmp/payload.b64 -out /tmp/payload.zip
|
||||
|
||||
mkdir -p /tmp/payload
|
||||
echo Unzipping payload file
|
||||
unzip -o /tmp/payload.zip -d /tmp/payload/
|
||||
|
||||
# if the autorun.sh script exists, run it
|
||||
if [ -e /tmp/payload/autorun.sh ]; then
|
||||
echo Running autorun.sh
|
||||
cd /tmp/payload
|
||||
sh /tmp/payload/autorun.sh
|
||||
|
||||
else
|
||||
echo rc.local : No autorun script to run
|
||||
fi
|
||||
|
||||
|
||||
exit 0
|
||||
34
doc/source/devref/server.conf.template
Normal file
34
doc/source/devref/server.conf.template
Normal file
@@ -0,0 +1,34 @@
|
||||
port 1194
|
||||
proto udp
|
||||
dev tap0
|
||||
up "/etc/openvpn/up.sh br0"
|
||||
down "/etc/openvpn/down.sh br0"
|
||||
|
||||
persist-key
|
||||
persist-tun
|
||||
|
||||
ca ca.crt
|
||||
cert server.crt
|
||||
key server.key # This file should be kept secret
|
||||
|
||||
dh dh1024.pem
|
||||
ifconfig-pool-persist ipp.txt
|
||||
|
||||
server-bridge VPN_IP DHCP_SUBNET DHCP_LOWER DHCP_UPPER
|
||||
|
||||
client-to-client
|
||||
keepalive 10 120
|
||||
comp-lzo
|
||||
|
||||
max-clients 1
|
||||
|
||||
user nobody
|
||||
group nogroup
|
||||
|
||||
persist-key
|
||||
persist-tun
|
||||
|
||||
status openvpn-status.log
|
||||
|
||||
verb 3
|
||||
mute 20
|
||||
127
doc/source/devref/zone.rst
Normal file
127
doc/source/devref/zone.rst
Normal file
@@ -0,0 +1,127 @@
|
||||
..
|
||||
Copyright 2010-2011 OpenStack LLC
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Zones
|
||||
=====
|
||||
|
||||
A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution.
|
||||
|
||||
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
|
||||
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones.
|
||||
|
||||
Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
|
||||
|
||||
|
||||
Capabilities
|
||||
------------
|
||||
Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form:
|
||||
|
||||
::
|
||||
|
||||
key=value;value;value, key=value;value;value
|
||||
|
||||
Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
|
||||
|
||||
Flow within a Zone
|
||||
------------------
|
||||
The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for:
|
||||
- collecting capability messages from the Compute, Volume and Network nodes,
|
||||
- polling the child Zones for their status and
|
||||
- providing data to the Distributed Scheduler for performing load balancing calculations
|
||||
|
||||
Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes.
|
||||
|
||||
These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing.
|
||||
|
||||
The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova").
|
||||
|
||||
Zone administrative functions
|
||||
-----------------------------
|
||||
Zone administrative operations are usually done using python-novaclient_
|
||||
|
||||
.. _python-novaclient: https://github.com/rackspace/python-novaclient
|
||||
|
||||
In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag.
|
||||
|
||||
Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature.
|
||||
|
||||
Find out about this Zone
|
||||
------------------------
|
||||
In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command.
|
||||
|
||||
::
|
||||
|
||||
alice@novadev:~$ nova zone-info
|
||||
+-----------------+---------------+
|
||||
| Property | Value |
|
||||
+-----------------+---------------+
|
||||
| compute_cpu | 0.7,0.7 |
|
||||
| compute_disk | 123000,123000 |
|
||||
| compute_network | 800,800 |
|
||||
| hypervisor | xenserver |
|
||||
| name | nova |
|
||||
| network_cpu | 0.7,0.7 |
|
||||
| network_disk | 123000,123000 |
|
||||
| network_network | 800,800 |
|
||||
| os | linux |
|
||||
+-----------------+---------------+
|
||||
|
||||
This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `<service>_<capability> = <min>,<max>` format.
|
||||
|
||||
Adding a child Zone
|
||||
-------------------
|
||||
Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command:
|
||||
|
||||
::
|
||||
|
||||
nova zone-add <child zone api url> <username> <nova api key>
|
||||
|
||||
You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example:
|
||||
|
||||
::
|
||||
|
||||
export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934"
|
||||
export NOVA_USERNAME="alice"
|
||||
export NOVA_URL="http://192.168.2.120:8774/v1.0/"
|
||||
|
||||
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
|
||||
Getting a list of child Zones
|
||||
-----------------------------
|
||||
|
||||
::
|
||||
|
||||
nova zone-list
|
||||
|
||||
alice@novadev:~$ nova zone-list
|
||||
+----+-------+-----------+--------------------------------------------+---------------------------------+
|
||||
| ID | Name | Is Active | Capabilities | API URL |
|
||||
+----+-------+-----------+--------------------------------------------+---------------------------------+
|
||||
| 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ |
|
||||
| 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ |
|
||||
+----+-------+-----------+--------------------------------------------+---------------------------------+
|
||||
|
||||
This equates to a GET operation to `.../zones`.
|
||||
|
||||
Removing a child Zone
|
||||
---------------------
|
||||
::
|
||||
|
||||
nova zone-delete <N>
|
||||
|
||||
This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy.
|
||||
7
doc/source/down.sh
Normal file
7
doc/source/down.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
BR=$1
|
||||
DEV=$2
|
||||
|
||||
/usr/sbin/brctl delif $BR $DEV
|
||||
/sbin/ifconfig $DEV down
|
||||
@@ -240,6 +240,16 @@ Nova Images
|
||||
|
||||
Converts all images in directory from the old (Bexar) format to the new format.
|
||||
|
||||
Nova VM
|
||||
~~~~~~~~~~~
|
||||
|
||||
``nova-manage vm list [host]``
|
||||
Show a list of all instances. Accepts optional hostname (to show only instances on specific host).
|
||||
|
||||
``nova-manage live-migration <ec2_id> <destination host name>``
|
||||
Live migrate instance from current host to destination host. Requires instance id (which comes from euca-describe-instance) and destination host name (which can be found from nova-manage service list).
|
||||
|
||||
|
||||
FILES
|
||||
========
|
||||
|
||||
|
||||
7
doc/source/up.sh
Normal file
7
doc/source/up.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
BR=$1
|
||||
DEV=$2
|
||||
MTU=$3
|
||||
/sbin/ifconfig $DEV mtu $MTU promisc up
|
||||
/usr/sbin/brctl addif $BR $DEV
|
||||
@@ -36,6 +36,7 @@ from nova import rpc
|
||||
from nova import service
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.api.ec2 import cloud
|
||||
@@ -247,6 +248,37 @@ class CloudTestCase(test.TestCase):
|
||||
self.assertRaises(NotFound, describe_images,
|
||||
self.context, ['ami-fake'])
|
||||
|
||||
def test_describe_image_attribute(self):
|
||||
describe_image_attribute = self.cloud.describe_image_attribute
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}, 'is_public': True}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
result = describe_image_attribute(self.context, 'ami-00000001',
|
||||
'launchPermission')
|
||||
self.assertEqual([{'group': 'all'}], result['launchPermission'])
|
||||
|
||||
def test_modify_image_attribute(self):
|
||||
modify_image_attribute = self.cloud.modify_image_attribute
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}, 'is_public': False}
|
||||
|
||||
def fake_update(meh, context, image_id, metadata, data=None):
|
||||
return metadata
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'update', fake_update)
|
||||
result = modify_image_attribute(self.context, 'ami-00000001',
|
||||
'launchPermission', 'add',
|
||||
user_group=['all'])
|
||||
self.assertEqual(True, result['is_public'])
|
||||
|
||||
def test_console_output(self):
|
||||
instance_type = FLAGS.default_instance_type
|
||||
max_count = 1
|
||||
@@ -341,6 +373,19 @@ class CloudTestCase(test.TestCase):
|
||||
LOG.debug(_("Terminating instance %s"), instance_id)
|
||||
rv = self.compute.terminate_instance(instance_id)
|
||||
|
||||
def test_terminate_instances(self):
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'host': 'host1'})
|
||||
terminate_instances = self.cloud.terminate_instances
|
||||
# valid instance_id
|
||||
result = terminate_instances(self.context, ['i-00000001'])
|
||||
self.assertTrue(result)
|
||||
# non-existing instance_id
|
||||
self.assertRaises(exception.InstanceNotFound, terminate_instances,
|
||||
self.context, ['i-2'])
|
||||
db.instance_destroy(self.context, inst1['id'])
|
||||
|
||||
def test_update_of_instance_display_fields(self):
|
||||
inst = db.instance_create(self.context, {})
|
||||
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||
|
||||
@@ -84,7 +84,8 @@ class ComputeTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
|
||||
inst['instance_type_id'] = type_id
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst.update(params)
|
||||
@@ -132,7 +133,7 @@ class ComputeTestCase(test.TestCase):
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
FLAGS.default_instance_type, None, **instance)
|
||||
instance_types.get_default_instance_type(), None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0]['display_name'], None)
|
||||
finally:
|
||||
@@ -143,7 +144,7 @@ class ComputeTestCase(test.TestCase):
|
||||
group = self._create_group()
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
@@ -161,7 +162,7 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
@@ -177,7 +178,7 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
|
||||
@@ -359,8 +360,9 @@ class ComputeTestCase(test.TestCase):
|
||||
instance_id = self._create_instance()
|
||||
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
|
||||
db.instance_update(self.context, instance_id,
|
||||
{'instance_type': 'm1.xlarge'})
|
||||
{'instance_type_id': inst_type['id']})
|
||||
|
||||
self.assertRaises(exception.ApiError, self.compute_api.resize,
|
||||
context, instance_id, 1)
|
||||
@@ -380,8 +382,8 @@ class ComputeTestCase(test.TestCase):
|
||||
self.compute.terminate_instance(context, instance_id)
|
||||
|
||||
def test_get_by_flavor_id(self):
|
||||
type = instance_types.get_by_flavor_id(1)
|
||||
self.assertEqual(type, 'm1.tiny')
|
||||
type = instance_types.get_instance_type_by_flavor_id(1)
|
||||
self.assertEqual(type['name'], 'm1.tiny')
|
||||
|
||||
def test_resize_same_source_fails(self):
|
||||
"""Ensure instance fails to migrate when source and destination are
|
||||
@@ -664,4 +666,5 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
LOG.info(_("After force-killing instances: %s"), instances)
|
||||
self.assertEqual(len(instances), 0)
|
||||
self.assertEqual(len(instances), 1)
|
||||
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
|
||||
|
||||
@@ -62,7 +62,7 @@ class ConsoleTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = 1
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
@@ -40,7 +40,11 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
max_flavorid = session.query(models.InstanceTypes).\
|
||||
order_by("flavorid desc").\
|
||||
first()
|
||||
max_id = session.query(models.InstanceTypes).\
|
||||
order_by("id desc").\
|
||||
first()
|
||||
self.flavorid = max_flavorid["flavorid"] + 1
|
||||
self.id = max_id["id"] + 1
|
||||
self.name = str(int(time.time()))
|
||||
|
||||
def test_instance_type_create_then_delete(self):
|
||||
@@ -53,7 +57,7 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
'instance type was not created')
|
||||
instance_types.destroy(self.name)
|
||||
self.assertEqual(1,
|
||||
instance_types.get_instance_type(self.name)["deleted"])
|
||||
instance_types.get_instance_type(self.id)["deleted"])
|
||||
self.assertEqual(starting_inst_list, instance_types.get_all_types())
|
||||
instance_types.purge(self.name)
|
||||
self.assertEqual(len(starting_inst_list),
|
||||
|
||||
@@ -263,7 +263,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = '1'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['vcpus'] = kwargs.get('vcpus', 1)
|
||||
inst['ami_launch_index'] = 0
|
||||
|
||||
@@ -140,7 +140,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'instance_type': 'm1.small'}
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def lazy_load_library_exists(self):
|
||||
"""check if libvirt is available."""
|
||||
@@ -479,7 +479,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
fake_timer = FakeTime()
|
||||
|
||||
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
|
||||
self.create_fake_libvirt_mock()
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
# Start test
|
||||
@@ -488,6 +488,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||
conn.ensure_filtering_rules_for_instance(instance_ref,
|
||||
time=fake_timer)
|
||||
except exception.Error, e:
|
||||
|
||||
@@ -106,7 +106,7 @@ class VolumeTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = '2' # m1.tiny
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
instance_id = db.instance_create(self.context, inst)['id']
|
||||
|
||||
@@ -80,7 +80,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
@@ -289,11 +289,11 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'enabled':'1'}],
|
||||
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
|
||||
'netmask': '120',
|
||||
'enabled': '1',
|
||||
'gateway': 'fe80::a00:1'}],
|
||||
'enabled': '1'}],
|
||||
'mac': 'aa:bb:cc:dd:ee:ff',
|
||||
'dns': ['10.0.0.2'],
|
||||
'gateway': '10.0.0.1'})
|
||||
'gateway': '10.0.0.1',
|
||||
'gateway6': 'fe80::a00:1'})
|
||||
|
||||
def check_vm_params_for_windows(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'true')
|
||||
@@ -328,7 +328,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||
|
||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||
instance_type="m1.large", os_type="linux",
|
||||
instance_type_id="3", os_type="linux",
|
||||
instance_id=1, check_injection=False):
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
values = {'id': instance_id,
|
||||
@@ -337,7 +337,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type': instance_type,
|
||||
'instance_type_id': instance_type_id,
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': os_type}
|
||||
instance = db.instance_create(self.context, values)
|
||||
@@ -349,7 +349,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.assertRaises(Exception,
|
||||
self._test_spawn,
|
||||
1, 2, 3, "m1.xlarge")
|
||||
1, 2, 3, "4") # m1.xlarge
|
||||
|
||||
def test_spawn_raw_objectstore(self):
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
@@ -523,7 +523,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
@@ -580,7 +580,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
|
||||
46
nova/wsgi.py
46
nova/wsgi.py
@@ -357,7 +357,8 @@ class Controller(object):
|
||||
|
||||
if type(result) is dict:
|
||||
content_type = req.best_match_content_type()
|
||||
body = self._serialize(result, content_type)
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
body = self._serialize(result, content_type, default_xmlns)
|
||||
|
||||
response = webob.Response()
|
||||
response.headers["Content-Type"] = content_type
|
||||
@@ -366,18 +367,18 @@ class Controller(object):
|
||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||
LOG.debug(msg)
|
||||
return response
|
||||
|
||||
else:
|
||||
return result
|
||||
|
||||
def _serialize(self, data, content_type):
|
||||
def _serialize(self, data, content_type, default_xmlns):
|
||||
"""
|
||||
Serialize the given dict to the provided content_type.
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
"""
|
||||
_metadata = getattr(type(self), "_serialization_metadata", {})
|
||||
serializer = Serializer(_metadata)
|
||||
|
||||
serializer = Serializer(_metadata, default_xmlns)
|
||||
try:
|
||||
return serializer.serialize(data, content_type)
|
||||
except exception.InvalidContentType:
|
||||
@@ -393,19 +394,24 @@ class Controller(object):
|
||||
serializer = Serializer(_metadata)
|
||||
return serializer.deserialize(data, content_type)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
"""Provide the XML namespace to use if none is otherwise specified."""
|
||||
return None
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""
|
||||
Serializes and deserializes dictionaries to certain MIME types.
|
||||
"""
|
||||
|
||||
def __init__(self, metadata=None):
|
||||
def __init__(self, metadata=None, default_xmlns=None):
|
||||
"""
|
||||
Create a serializer based on the given WSGI environment.
|
||||
'metadata' is an optional dict mapping MIME types to information
|
||||
needed to serialize a dictionary to that type.
|
||||
"""
|
||||
self.metadata = metadata or {}
|
||||
self.default_xmlns = default_xmlns
|
||||
|
||||
def _get_serialize_handler(self, content_type):
|
||||
handlers = {
|
||||
@@ -483,12 +489,32 @@ class Serializer(object):
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
|
||||
|
||||
xmlns = node.getAttribute('xmlns')
|
||||
if not xmlns and self.default_xmlns:
|
||||
node.setAttribute('xmlns', self.default_xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
@@ -499,6 +525,16 @@ class Serializer(object):
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
|
||||
4691
po/pt_BR.po
4691
po/pt_BR.po
File diff suppressed because it is too large
Load Diff
4063
po/zh_CN.po
4063
po/zh_CN.po
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user