Fixed Authors conflict and re-merged with trunk

This commit is contained in:
Sandy Walsh 2010-12-09 10:22:50 -04:00
commit e6079449dc
64 changed files with 1867 additions and 1239 deletions

View File

@ -1,3 +1,13 @@
run_tests.err.log run_tests.err.log
.nova-venv .nova-venv
ChangeLog ChangeLog
_trial_temp
keys
networks
nova.sqlite
CA/cacert.pem
CA/index.txt*
CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem

View File

@ -20,9 +20,11 @@ Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com> Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org> Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org> Rick Clark <rick@openstack.org>
Ryan Lucio <rlucio@internap.com>
Sandy Walsh <sandy.walsh@rackspace.com> Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com> Soren Hansen <soren.hansen@rackspace.com>
Todd Willey <todd@ansolabs.com> Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com> Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com>

View File

@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import service from nova import service
from nova import twistd from nova import twistd
from nova import utils
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':

View File

@ -34,6 +34,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from nova import utils
from nova import twistd from nova import twistd
from nova.compute import monitor from nova.compute import monitor
@ -41,6 +42,7 @@ logging.getLogger('boto').setLevel(logging.WARN)
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':

View File

@ -359,9 +359,14 @@ class ProjectCommands(object):
def zipfile(self, project_id, user_id, filename='nova.zip'): def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file """Exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]""" arguments: project_id user_id [filename='nova.zip]"""
zip_file = self.manager.get_credentials(user_id, project_id) try:
with open(filename, 'w') as f: zip_file = self.manager.get_credentials(user_id, project_id)
f.write(zip_file) with open(filename, 'w') as f:
f.write(zip_file)
except db.api.NoMoreNetworks:
print ('No more networks available. If this is a new '
'installation, you need\nto call something like this:\n\n'
' nova-manage network create 10.0.0.0/8 10 64\n\n')
class FloatingIpCommands(object): class FloatingIpCommands(object):
@ -467,7 +472,7 @@ def methods_of(obj):
def main(): def main():
"""Parse options and call the appropriate class/method.""" """Parse options and call the appropriate class/method."""
utils.default_flagfile('/etc/nova/nova-manage.conf') utils.default_flagfile()
argv = FLAGS(sys.argv) argv = FLAGS(sys.argv)
if FLAGS.verbose: if FLAGS.verbose:

View File

@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import service from nova import service
from nova import twistd from nova import twistd
from nova import utils
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':

View File

@ -42,8 +42,8 @@ FLAGS = flags.FLAGS
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':
utils.default_flagfile()
application = handler.get_application() # pylint: disable-msg=C0103 application = handler.get_application() # pylint: disable-msg=C0103

View File

@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import service from nova import service
from nova import twistd from nova import twistd
from nova import utils
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':

View File

@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import service from nova import service
from nova import twistd from nova import twistd
from nova import utils
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__) twistd.serve(__file__)
if __name__ == '__builtin__': if __name__ == '__builtin__':

View File

@ -17,11 +17,19 @@ if [ ! -n "$HOST_IP" ]; then
# you should explicitly set HOST_IP in your environment # you should explicitly set HOST_IP in your environment
HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi fi
TEST=0
USE_MYSQL=0 USE_MYSQL=${USE_MYSQL:-0}
MYSQL_PASS=nova MYSQL_PASS=${MYSQL_PASS:-nova}
USE_LDAP=0 TEST=${TEST:-0}
LIBVIRT_TYPE=qemu USE_LDAP=${USE_LDAP:-0}
# Use OpenDJ instead of OpenLDAP when using LDAP
USE_OPENDJ=${USE_OPENDJ:-0}
LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu}
NET_MAN=${NET_MAN:-VlanManager}
# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface
# below but make sure that the interface doesn't already have an
# ip or you risk breaking things.
# FLAT_INTERFACE=eth0
if [ "$USE_MYSQL" == 1 ]; then if [ "$USE_MYSQL" == 1 ]; then
SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova
@ -36,10 +44,11 @@ else
fi fi
mkdir -p /etc/nova mkdir -p /etc/nova
cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
--verbose --verbose
--nodaemon --nodaemon
--dhcpbridge_flagfile=/etc/nova/nova-manage.conf --dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
--network_manager=nova.network.manager.$NET_MAN
--cc_host=$HOST_IP --cc_host=$HOST_IP
--routing_source_ip=$HOST_IP --routing_source_ip=$HOST_IP
--sql_connection=$SQL_CONN --sql_connection=$SQL_CONN
@ -47,6 +56,10 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF
--libvirt_type=$LIBVIRT_TYPE --libvirt_type=$LIBVIRT_TYPE
NOVA_CONF_EOF NOVA_CONF_EOF
if [ -n "$FLAT_INTERFACE" ]; then
echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf
fi
if [ "$CMD" == "branch" ]; then if [ "$CMD" == "branch" ]; then
sudo apt-get install -y bzr sudo apt-get install -y bzr
rm -rf $NOVA_DIR rm -rf $NOVA_DIR
@ -61,9 +74,12 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y python-software-properties sudo apt-get install -y python-software-properties
sudo add-apt-repository ppa:nova-core/ppa sudo add-apt-repository ppa:nova-core/ppa
sudo apt-get update sudo apt-get update
sudo apt-get install -y dnsmasq open-iscsi kpartx kvm gawk iptables ebtables sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables
sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen iscsitarget euca2ools vlan curl rabbitmq-server sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm sudo modprobe kvm
sudo /etc/init.d/libvirt-bin restart sudo /etc/init.d/libvirt-bin restart
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
@ -99,7 +115,13 @@ if [ "$CMD" == "run" ]; then
rm $NOVA_DIR/nova.sqlite rm $NOVA_DIR/nova.sqlite
fi fi
if [ "$USE_LDAP" == 1 ]; then if [ "$USE_LDAP" == 1 ]; then
sudo $NOVA_DIR/nova/auth/slap.sh if [ "$USE_OPENDJ" == 1 ]; then
echo '--ldap_user_dn=cn=Directory Manager' >> \
/etc/nova/nova-manage.conf
sudo $NOVA_DIR/nova/auth/opendj.sh
else
sudo $NOVA_DIR/nova/auth/slap.sh
fi
fi fi
rm -rf $NOVA_DIR/instances rm -rf $NOVA_DIR/instances
mkdir -p $NOVA_DIR/instances mkdir -p $NOVA_DIR/instances
@ -122,31 +144,32 @@ if [ "$CMD" == "run" ]; then
$NOVA_DIR/bin/nova-manage project create admin admin $NOVA_DIR/bin/nova-manage project create admin admin
# export environment variables for project 'admin' and user 'admin' # export environment variables for project 'admin' and user 'admin'
$NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc
# create 3 small networks # create a small network
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 3 16 $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
# nova api crashes if we start it with a regular screen command, # nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window. # so send the start command by forcing text into the window.
screen_it api "$NOVA_DIR/bin/nova-api --flagfile=/etc/nova/nova-manage.conf" screen_it api "$NOVA_DIR/bin/nova-api"
screen_it objectstore "$NOVA_DIR/bin/nova-objectstore --flagfile=/etc/nova/nova-manage.conf" screen_it objectstore "$NOVA_DIR/bin/nova-objectstore"
screen_it compute "$NOVA_DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf" screen_it compute "$NOVA_DIR/bin/nova-compute"
screen_it network "$NOVA_DIR/bin/nova-network --flagfile=/etc/nova/nova-manage.conf" screen_it network "$NOVA_DIR/bin/nova-network"
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler --flagfile=/etc/nova/nova-manage.conf" screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume --flagfile=/etc/nova/nova-manage.conf" screen_it volume "$NOVA_DIR/bin/nova-volume"
screen_it test ". $NOVA_DIR/novarc" screen_it test ". $NOVA_DIR/novarc"
screen -x screen -S nova -x
fi fi
if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then
# shutdown instances # shutdown instances
. $NOVA_DIR/novarc; euca-describe-instances | grep i- | cut -f2 | xargs euca-terminate-instances . $NOVA_DIR/novarc; euca-describe-instances | grep i- | cut -f2 | xargs euca-terminate-instances
sleep 2 sleep 2
# delete volumes
. $NOVA_DIR/novarc; euca-describe-volumes | grep vol- | cut -f2 | xargs -n1 euca-delete-volume
fi fi
if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then
screen -S nova -X quit screen -S nova -X quit
rm *.pid* rm *.pid*
$NOVA_DIR/tools/setup_iptables.sh clear
fi fi
if [ "$CMD" == "scrub" ]; then if [ "$CMD" == "scrub" ]; then

View File

@ -38,14 +38,14 @@ There are two main tools that a system administrator will find useful to manage
nova.manage nova.manage
euca2ools euca2ools
nova-manage may only be run by users with admin priviledges. euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users` The nova-manage command may only be run by users with admin priviledges. Commands for euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users`
User and Resource Management User and Resource Management
---------------------------- ----------------------------
nova-manage and euca2ools provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks. The nova-manage and euca2ools commands provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks.
For background on the core objects refenced in this section, see :doc:`../object.model` For background on the core objects referenced in this section, see :doc:`../object.model`
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1

View File

@ -20,21 +20,6 @@ Networking Overview
=================== ===================
In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.) The Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.) The Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network.
..
(perhaps some of this should be moved elsewhere)
Introduction
------------
Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
.. todo:: Insert Figure 1 image from "An OpenStack Network Overview" contributed by Citrix
Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
Nova Network Strategies Nova Network Strategies
----------------------- -----------------------

View File

@ -19,7 +19,7 @@ Installing Nova on Multiple Servers
=================================== ===================================
When you move beyond evaluating the technology and into building an actual When you move beyond evaluating the technology and into building an actual
production environemnt, you will need to know how to configure your datacenter production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you and how to deploy components across your clusters. This guide should help you
through that process. through that process.
@ -161,7 +161,7 @@ Step 3 Setup the sql db
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'%' = PASSWORD('nova'); SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
7. branch and install Nova 7. Branch and install Nova
:: ::
@ -186,9 +186,7 @@ Step 4 Setup Nova environment
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. We ended up doing this manually, (update query fired directly in the DB). Is there a better way to mark a network as bridged? On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected.
Update: This has been resolved w.e.f 27/10. network is marked as bridged automatically based on the type of n/w manager selected.
More networking details to create a network bridge for flat network More networking details to create a network bridge for flat network
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -233,7 +231,6 @@ unzip them in your home directory, and add them to your environment::
echo ". creds/novarc" >> ~/.bashrc echo ". creds/novarc" >> ~/.bashrc
~/.bashrc ~/.bashrc
Step 6 Restart all relevant services Step 6 Restart all relevant services
------------------------------------ ------------------------------------
@ -249,8 +246,8 @@ Restart relevant nova services::
.. todo:: do we still need the content below? .. todo:: do we still need the content below?
Bare-metal Provisioning Bare-metal Provisioning Notes
----------------------- -----------------------------
To install the base operating system you can use PXE booting. To install the base operating system you can use PXE booting.

View File

@ -50,7 +50,7 @@ The following diagram illustrates how the communication that occurs between the
Goals Goals
----- -----
* each project is in a protected network segment For our implementation of Nova, our goal is that each project is in a protected network segment. Here are the specifications we keep in mind for meeting this goal.
* RFC-1918 IP space * RFC-1918 IP space
* public IP via NAT * public IP via NAT
@ -59,19 +59,19 @@ Goals
* limited (project-admin controllable) access to other project segments * limited (project-admin controllable) access to other project segments
* all connectivity to instance and cloud API is via VPN into the project segment * all connectivity to instance and cloud API is via VPN into the project segment
* common DMZ segment for support services (only visible from project segment) We also keep as a goal a common DMZ segment for support services, meaning these items are only visible from project segment:
* metadata * metadata
* dashboard * dashboard
Limitations Limitations
----------- -----------
We kept in mind some of these limitations:
* Projects / cluster limited to available VLANs in switching infrastructure * Projects / cluster limited to available VLANs in switching infrastructure
* Requires VPN for access to project segment * Requires VPN for access to project segment
Implementation Implementation
-------------- --------------
Currently Nova segregates project VLANs using 802.1q VLAN tagging in the Currently Nova segregates project VLANs using 802.1q VLAN tagging in the

View File

@ -63,8 +63,20 @@ You see an access key and a secret key export, such as these made-up ones:::
export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7 export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
Step 5: Create the network
--------------------------
Step 5: Create a project with the user you created Type or copy/paste in the following line to create a network prior to creating a project.
::
sudo nova-manage network create 10.0.0.0/8 1 64
For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks.
After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
Step 6: Create a project with the user you created
-------------------------------------------------- --------------------------------------------------
Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne. Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
@ -94,7 +106,7 @@ Type or copy/paste in the following line to create a project named IRT (for Ice
Data Base Updated Data Base Updated
Step 6: Unzip the nova.zip Step 7: Unzip the nova.zip
-------------------------- --------------------------
You should have a nova.zip file in your current working directory. Unzip it with this command: You should have a nova.zip file in your current working directory. Unzip it with this command:
@ -116,7 +128,7 @@ You'll see these files extract.
extracting: cacert.pem extracting: cacert.pem
Step 7: Source the rc file Step 8: Source the rc file
-------------------------- --------------------------
Type or copy/paste the following to source the novarc file in your current working directory. Type or copy/paste the following to source the novarc file in your current working directory.
@ -125,7 +137,7 @@ Type or copy/paste the following to source the novarc file in your current worki
. novarc . novarc
Step 8: Pat yourself on the back :) Step 9: Pat yourself on the back :)
----------------------------------- -----------------------------------
Congratulations, your cloud is up and running, youve created an admin user, retrieved the user's credentials and put them in your environment. Congratulations, your cloud is up and running, youve created an admin user, retrieved the user's credentials and put them in your environment.

View File

@ -21,7 +21,7 @@
Cloudpipe -- Per Project Vpns Cloudpipe -- Per Project Vpns
============================= =============================
Cloudpipe is a method for connecting end users to their project insnances in vlan mode. Cloudpipe is a method for connecting end users to their project instances in vlan mode.
Overview Overview

View File

@ -23,13 +23,13 @@ Nova Concepts and Introduction
Introduction Introduction
------------ ------------
Nova is the software that controls your Infrastructure as as Service (IaaS) Nova, also known as OpenStack Compute, is the software that controls your Infrastructure as as Service (IaaS)
cloud computing platform. It is similar in scope to Amazon EC2 and Rackspace cloud computing platform. It is similar in scope to Amazon EC2 and Rackspace
CloudServers. Nova does not include any virtualization software, rather it Cloud Servers. Nova does not include any virtualization software, rather it
defines drivers that interact with underlying virtualization mechanisms that defines drivers that interact with underlying virtualization mechanisms that
run on your host operating system, and exposes functionality over a web API. run on your host operating system, and exposes functionality over a web API.
This document does not attempt to explain fundamental concepts of cloud This site does not attempt to explain fundamental concepts of cloud
computing, IaaS, virtualization, or other related technologies. Instead, it computing, IaaS, virtualization, or other related technologies. Instead, it
focuses on describing how Nova's implementation of those concepts is achieved. focuses on describing how Nova's implementation of those concepts is achieved.
@ -64,6 +64,19 @@ Concept: Instances
An 'instance' is a word for a virtual machine that runs inside the cloud. An 'instance' is a word for a virtual machine that runs inside the cloud.
Concept: System Architecture
----------------------------
Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
.. image:: images/Novadiagram.png
Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
Concept: Storage Concept: Storage
---------------- ----------------
@ -104,9 +117,9 @@ Concept: API
Concept: Networking Concept: Networking
------------------- -------------------
Nova has a concept of Fixed Ips and Floating ips. Fixed ips are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time. Nova has a concept of Fixed IPs and Floating IPs. Fixed IPs are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time.
There are multiple strategies available for implementing fixed ips: There are multiple strategies available for implementing fixed IPs:
Flat Mode Flat Mode
~~~~~~~~~ ~~~~~~~~~
@ -116,7 +129,7 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
Flat DHCP Mode Flat DHCP Mode
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed ips by doing a dhcpdiscover. This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
VLAN DHCP Mode VLAN DHCP Mode
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
@ -150,7 +163,7 @@ See doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags Concept: Flags
-------------- --------------
python-gflags Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Concept: Plugins Concept: Plugins
@ -187,8 +200,17 @@ Concept: Scheduler
Concept: Security Groups Concept: Security Groups
------------------------ ------------------------
Security groups In Nova, a security group is a named collection of network access rules, like firewall policies. These access rules specify which incoming network traffic should be delivered to all VM instances in the group, all other incoming traffic being discarded. Users can modify rules for a group at any time. The new rules are automatically enforced for all running instances and instances launched from then on.
When launching VM instances, the project manager specifies which security groups it wants to join. It will become a member of these specified security groups when it is launched. If no groups are specified, the instances is assigned to the default group, which by default allows all network traffic from other members of this group and discards traffic from other IP addresses and groups. If this does not meet a user's needs, the user can modify the rule settings of the default group.
A security group can be thought of as a security profile or a security role - it promotes the good practice of managing firewalls by role, not by machine. For example, a user could stipulate that servers with the "webapp" role must be able to connect to servers with the "mysql" role on port 3306. Going further with the security profile analogy, an instance can be launched with membership of multiple security groups - similar to a server with multiple roles. Because all rules in security groups are ACCEPT rules, it's trivial to combine them.
Each rule in a security group must specify the source of packets to be allowed, which can either be a subnet anywhere on the Internet (in CIDR notation, with 0.0.0./0 representing the entire Internet) or another security group. In the latter case, the source security group can be any user's group. This makes it easy to grant selective access to one user's instances from instances run by the user's friends, partners, and vendors.
The creation of rules with other security groups specified as sources helps users deal with dynamic IP addressing. Without this feature, the user would have had to adjust the security groups each time a new instance is launched. This practice would become cumbersome if an application running in Nova is very dynamic and elastic, for example scales up or down frequently.
Security groups for a VM are passed at launch time by the cloud controller to the compute node, and applied at the compute node when a VM is started.
Concept: Certificate Authority Concept: Certificate Authority
------------------------------ ------------------------------

View File

@ -1,58 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods for API calls to control instances via AMQP.
"""
from nova import db
from nova import flags
from nova import rpc
FLAGS = flags.FLAGS
def reboot(instance_id, context=None):
"""Reboot the given instance."""
instance_ref = db.instance_get_by_internal_id(context, instance_id)
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance_ref['id']}})
def rescue(instance_id, context):
"""Rescue the given instance."""
instance_ref = db.instance_get_by_internal_id(context, instance_id)
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
"args": {"instance_id": instance_ref['id']}})
def unrescue(instance_id, context):
"""Unrescue the given instance."""
instance_ref = db.instance_get_by_internal_id(context, instance_id)
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance_ref['id']}})

View File

@ -39,9 +39,8 @@ from nova import flags
from nova import quota from nova import quota
from nova import rpc from nova import rpc
from nova import utils from nova import utils
from nova.compute.instance_types import INSTANCE_TYPES from nova.compute import api as compute_api
from nova.api import cloud from nova.compute import instance_types
from nova.image.s3 import S3ImageService
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@ -50,11 +49,6 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException InvalidInputException = exception.InvalidInputException
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
def _gen_key(context, user_id, key_name): def _gen_key(context, user_id, key_name):
"""Generate a key """Generate a key
@ -99,8 +93,9 @@ class CloudController(object):
""" """
def __init__(self): def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_manager = utils.import_object(FLAGS.compute_manager) self.image_service = utils.import_object(FLAGS.image_service)
self.image_service = S3ImageService() self.compute_api = compute_api.ComputeAPI(self.network_manager,
self.image_service)
self.setup() self.setup()
def __str__(self): def __str__(self):
@ -124,10 +119,10 @@ class CloudController(object):
def _get_mpi_data(self, context, project_id): def _get_mpi_data(self, context, project_id):
result = {} result = {}
for instance in db.instance_get_all_by_project(context, project_id): for instance in self.compute_api.get_instances(context, project_id):
if instance['fixed_ip']: if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'], line = '%s slots=%d' % (instance['fixed_ip']['address'],
INSTANCE_TYPES[instance['instance_type']]['vcpus']) instance['vcpus'])
key = str(instance['key_name']) key = str(instance['key_name'])
if key in result: if key in result:
result[key].append(line) result[key].append(line)
@ -260,7 +255,7 @@ class CloudController(object):
return True return True
def describe_security_groups(self, context, group_name=None, **kwargs): def describe_security_groups(self, context, group_name=None, **kwargs):
self._ensure_default_security_group(context) self.compute_api.ensure_default_security_group(context)
if context.user.is_admin(): if context.user.is_admin():
groups = db.security_group_get_all(context) groups = db.security_group_get_all(context)
else: else:
@ -358,7 +353,7 @@ class CloudController(object):
return False return False
def revoke_security_group_ingress(self, context, group_name, **kwargs): def revoke_security_group_ingress(self, context, group_name, **kwargs):
self._ensure_default_security_group(context) self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context, security_group = db.security_group_get_by_name(context,
context.project_id, context.project_id,
group_name) group_name)
@ -383,7 +378,7 @@ class CloudController(object):
# for these operations, so support for newer API versions # for these operations, so support for newer API versions
# is sketchy. # is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs): def authorize_security_group_ingress(self, context, group_name, **kwargs):
self._ensure_default_security_group(context) self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context, security_group = db.security_group_get_by_name(context,
context.project_id, context.project_id,
group_name) group_name)
@ -419,7 +414,7 @@ class CloudController(object):
return source_project_id return source_project_id
def create_security_group(self, context, group_name, group_description): def create_security_group(self, context, group_name, group_description):
self._ensure_default_security_group(context) self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name): if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError('group %s already exists' % group_name) raise exception.ApiError('group %s already exists' % group_name)
@ -443,7 +438,7 @@ class CloudController(object):
# instance_id is passed in as a list of instances # instance_id is passed in as a list of instances
ec2_id = instance_id[0] ec2_id = instance_id[0]
internal_id = ec2_id_to_internal_id(ec2_id) internal_id = ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id) instance_ref = self.compute_api.get_instance(context, internal_id)
output = rpc.call(context, output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic, '%s.%s' % (FLAGS.compute_topic,
instance_ref['host']), instance_ref['host']),
@ -505,9 +500,8 @@ class CloudController(object):
if quota.allowed_volumes(context, 1, size) < 1: if quota.allowed_volumes(context, 1, size) < 1:
logging.warn("Quota exceeeded for %s, tried to create %sG volume", logging.warn("Quota exceeeded for %s, tried to create %sG volume",
context.project_id, size) context.project_id, size)
raise QuotaError("Volume quota exceeded. You cannot " raise quota.QuotaError("Volume quota exceeded. You cannot "
"create a volume of size %s" % "create a volume of size %s" % size)
size)
vol = {} vol = {}
vol['size'] = size vol['size'] = size
vol['user_id'] = context.user.id vol['user_id'] = context.user.id
@ -541,7 +535,7 @@ class CloudController(object):
if volume_ref['attach_status'] == "attached": if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached") raise exception.ApiError("Volume is already attached")
internal_id = ec2_id_to_internal_id(instance_id) internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id) instance_ref = self.compute_api.get_instance(context, internal_id)
host = instance_ref['host'] host = instance_ref['host']
rpc.cast(context, rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host), db.queue_get_for(context, FLAGS.compute_topic, host),
@ -619,11 +613,7 @@ class CloudController(object):
instances = db.instance_get_all_by_reservation(context, instances = db.instance_get_all_by_reservation(context,
reservation_id) reservation_id)
else: else:
if context.user.is_admin(): instances = self.compute_api.get_instances(context)
instances = db.instance_get_all(context)
else:
instances = db.instance_get_all_by_project(context,
context.project_id)
for instance in instances: for instance in instances:
if not context.user.is_admin(): if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id: if instance['image_id'] == FLAGS.vpn_image_id:
@ -699,8 +689,8 @@ class CloudController(object):
if quota.allowed_floating_ips(context, 1) < 1: if quota.allowed_floating_ips(context, 1) < 1:
logging.warn("Quota exceeeded for %s, tried to allocate address", logging.warn("Quota exceeeded for %s, tried to allocate address",
context.project_id) context.project_id)
raise QuotaError("Address quota exceeded. You cannot " raise quota.QuotaError("Address quota exceeded. You cannot "
"allocate any more addresses") "allocate any more addresses")
network_topic = self._get_network_topic(context) network_topic = self._get_network_topic(context)
public_ip = rpc.call(context, public_ip = rpc.call(context,
network_topic, network_topic,
@ -720,7 +710,7 @@ class CloudController(object):
def associate_address(self, context, instance_id, public_ip, **kwargs): def associate_address(self, context, instance_id, public_ip, **kwargs):
internal_id = ec2_id_to_internal_id(instance_id) internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id) instance_ref = self.compute_api.get_instance(context, internal_id)
fixed_address = db.instance_get_fixed_address(context, fixed_address = db.instance_get_fixed_address(context,
instance_ref['id']) instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
@ -752,218 +742,49 @@ class CloudController(object):
"args": {"network_id": network_ref['id']}}) "args": {"network_id": network_ref['id']}})
return db.queue_get_for(context, FLAGS.network_topic, host) return db.queue_get_for(context, FLAGS.network_topic, host)
def _ensure_default_security_group(self, context):
try:
db.security_group_get_by_name(context,
context.project_id,
'default')
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user.id,
'project_id': context.project_id}
group = db.security_group_create(context, values)
def run_instances(self, context, **kwargs): def run_instances(self, context, **kwargs):
instance_type = kwargs.get('instance_type', 'm1.small') max_count = int(kwargs.get('max_count', 1))
if instance_type not in INSTANCE_TYPES: instances = self.compute_api.create_instances(context,
raise exception.ApiError("Unknown instance type: %s", instance_types.get_by_type(kwargs.get('instance_type', None)),
instance_type) kwargs['image_id'],
# check quota min_count=int(kwargs.get('min_count', max_count)),
max_instances = int(kwargs.get('max_count', 1)) max_count=max_count,
min_instances = int(kwargs.get('min_count', max_instances)) kernel_id=kwargs.get('kernel_id'),
num_instances = quota.allowed_instances(context, ramdisk_id=kwargs.get('ramdisk_id'),
max_instances, display_name=kwargs.get('display_name'),
instance_type) description=kwargs.get('display_description'),
if num_instances < min_instances: key_name=kwargs.get('key_name'),
logging.warn("Quota exceeeded for %s, tried to run %s instances", security_group=kwargs.get('security_group'),
context.project_id, min_instances) generate_hostname=internal_id_to_ec2_id)
raise QuotaError("Instance quota exceeded. You can only " return self._format_run_instances(context,
"run %s more instances of this type." % instances[0]['reservation_id'])
num_instances, "InstanceLimitExceeded")
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
if not vpn:
image = self.image_service.show(context, kwargs['image_id'])
# FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
image_id = image['imageId']
kernel_id = image.get('kernelId', FLAGS.default_kernel)
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# API parameters overrides of defaults
kernel_id = kwargs.get('kernel_id', kernel_id)
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
self.image_service.show(context, kernel_id)
self.image_service.show(context, ramdisk_id)
logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if 'key_name' in kwargs:
key_pair_ref = db.key_pair_get(context,
context.user.id,
kwargs['key_name'])
key_data = key_pair_ref['public_key']
security_group_arg = kwargs.get('security_group', ["default"])
if not type(security_group_arg) is list:
security_group_arg = [security_group_arg]
security_groups = []
self._ensure_default_security_group(context)
for security_group_name in security_group_arg:
group = db.security_group_get_by_name(context,
context.project_id,
security_group_name)
security_groups.append(group['id'])
reservation_id = utils.generate_uid('r')
base_options = {}
base_options['state_description'] = 'scheduling'
base_options['image_id'] = image_id
base_options['kernel_id'] = kernel_id
base_options['ramdisk_id'] = ramdisk_id
base_options['reservation_id'] = reservation_id
base_options['key_data'] = key_data
base_options['key_name'] = kwargs.get('key_name', None)
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project_id
base_options['user_data'] = kwargs.get('user_data', '')
base_options['display_name'] = kwargs.get('display_name')
base_options['display_description'] = kwargs.get('display_description')
type_data = INSTANCE_TYPES[instance_type]
base_options['instance_type'] = instance_type
base_options['memory_mb'] = type_data['memory_mb']
base_options['vcpus'] = type_data['vcpus']
base_options['local_gb'] = type_data['local_gb']
elevated = context.elevated()
for num in range(num_instances):
instance_ref = self.compute_manager.create_instance(context,
security_groups,
mac_address=utils.generate_mac(),
launch_index=num,
**base_options)
inst_id = instance_ref['id']
internal_id = instance_ref['internal_id']
ec2_id = internal_id_to_ec2_id(internal_id)
self.compute_manager.update_instance(context,
inst_id,
hostname=ec2_id)
# TODO(vish): This probably should be done in the scheduler
# or in compute as a call. The network should be
# allocated after the host is assigned and setup
# can happen at the same time.
address = self.network_manager.allocate_fixed_ip(context,
inst_id,
vpn)
network_topic = self._get_network_topic(context)
rpc.cast(elevated,
network_topic,
{"method": "setup_fixed_ip",
"args": {"address": address}})
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": inst_id}})
logging.debug("Casting to scheduler for %s/%s's instance %s" %
(context.project.name, context.user.name, inst_id))
return self._format_run_instances(context, reservation_id)
def terminate_instances(self, context, instance_id, **kwargs): def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids. """Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
instance_id is a kwarg so its name cannot be modified.
"""
ec2_id_list = instance_id
logging.debug("Going to start terminating instances") logging.debug("Going to start terminating instances")
for id_str in ec2_id_list: for ec2_id in instance_id:
internal_id = ec2_id_to_internal_id(id_str) internal_id = ec2_id_to_internal_id(ec2_id)
logging.debug("Going to try and terminate %s" % id_str) self.compute_api.delete_instance(context, internal_id)
try:
instance_ref = db.instance_get_by_internal_id(context,
internal_id)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate",
id_str)
continue
if (instance_ref['state_description'] == 'terminating'):
logging.warning("Instance %s is already being terminated",
id_str)
continue
now = datetime.datetime.utcnow()
self.compute_manager.update_instance(context,
instance_ref['id'],
state_description='terminating',
state=0,
terminated_at=now)
# FIXME(ja): where should network deallocate occur?
address = db.instance_get_floating_address(context,
instance_ref['id'])
if address:
logging.debug("Disassociating address %s" % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
network_topic = self._get_network_topic(context)
rpc.cast(context,
network_topic,
{"method": "disassociate_floating_ip",
"args": {"floating_address": address}})
address = db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
logging.debug("Deallocating address %s" % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
host = instance_ref['host']
if host:
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"instance_id": instance_ref['id']}})
else:
db.instance_destroy(context, instance_ref['id'])
return True return True
def reboot_instances(self, context, instance_id, **kwargs): def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids""" """instance_id is a list of instance ids"""
for ec2_id in instance_id: for ec2_id in instance_id:
internal_id = ec2_id_to_internal_id(ec2_id) internal_id = ec2_id_to_internal_id(ec2_id)
cloud.reboot(internal_id, context=context) self.compute_api.reboot(context, internal_id)
return True return True
def rescue_instance(self, context, instance_id, **kwargs): def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api""" """This is an extension to the normal ec2_api"""
internal_id = ec2_id_to_internal_id(instance_id) internal_id = ec2_id_to_internal_id(instance_id)
cloud.rescue(internal_id, context=context) self.compute_api.rescue(context, internal_id)
return True return True
def unrescue_instance(self, context, instance_id, **kwargs): def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api""" """This is an extension to the normal ec2_api"""
internal_id = ec2_id_to_internal_id(instance_id) internal_id = ec2_id_to_internal_id(instance_id)
cloud.unrescue(internal_id, context=context) self.compute_api.unrescue(context, internal_id)
return True return True
def update_instance(self, context, ec2_id, **kwargs): def update_instance(self, context, ec2_id, **kwargs):
@ -974,7 +795,7 @@ class CloudController(object):
changes[field] = kwargs[field] changes[field] = kwargs[field]
if changes: if changes:
internal_id = ec2_id_to_internal_id(ec2_id) internal_id = ec2_id_to_internal_id(ec2_id)
inst = db.instance_get_by_internal_id(context, internal_id) inst = self.compute_api.get_instance(context, internal_id)
db.instance_update(context, inst['id'], kwargs) db.instance_update(context, inst['id'], kwargs)
return True return True
@ -994,8 +815,11 @@ class CloudController(object):
return True return True
def describe_images(self, context, image_id=None, **kwargs): def describe_images(self, context, image_id=None, **kwargs):
imageSet = self.image_service.index(context, image_id) # Note: image_id is a list!
return {'imagesSet': imageSet} images = self.image_service.index(context)
if image_id:
images = filter(lambda x: x['imageId'] in image_id, images)
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs): def deregister_image(self, context, image_id, **kwargs):
self.image_service.deregister(context, image_id) self.image_service.deregister(context, image_id)

View File

@ -25,6 +25,7 @@ import time
import logging import logging
import routes import routes
import traceback
import webob.dec import webob.dec
import webob.exc import webob.exc
import webob import webob
@ -65,6 +66,7 @@ class API(wsgi.Middleware):
return req.get_response(self.application) return req.get_response(self.application)
except Exception as ex: except Exception as ex:
logging.warn("Caught error: %s" % str(ex)) logging.warn("Caught error: %s" % str(ex))
logging.debug(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc) return faults.Fault(exc)

View File

@ -23,10 +23,7 @@ class Context(object):
class BasicApiAuthManager(object): class BasicApiAuthManager(object):
""" Implements a somewhat rudimentary version of OpenStack Auth""" """ Implements a somewhat rudimentary version of OpenStack Auth"""
def __init__(self, host=None, db_driver=None): def __init__(self, db_driver=None):
if not host:
host = FLAGS.host
self.host = host
if not db_driver: if not db_driver:
db_driver = FLAGS.db_driver db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver) self.db = utils.import_object(db_driver)
@ -47,7 +44,7 @@ class BasicApiAuthManager(object):
except KeyError: except KeyError:
return faults.Fault(webob.exc.HTTPUnauthorized()) return faults.Fault(webob.exc.HTTPUnauthorized())
token, user = self._authorize_user(username, key) token, user = self._authorize_user(username, key, req)
if user and token: if user and token:
res = webob.Response() res = webob.Response()
res.headers['X-Auth-Token'] = token.token_hash res.headers['X-Auth-Token'] = token.token_hash
@ -82,8 +79,13 @@ class BasicApiAuthManager(object):
return {'id': user.id} return {'id': user.id}
return None return None
def _authorize_user(self, username, key): def _authorize_user(self, username, key, req):
""" Generates a new token and assigns it to a user """ """Generates a new token and assigns it to a user.
username - string
key - string API key
req - webob.Request object
"""
user = self.auth.get_user_from_access_key(key) user = self.auth.get_user_from_access_key(key)
if user and user.name == username: if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key, token_hash = hashlib.sha1('%s%s%f' % (username, key,
@ -91,12 +93,10 @@ class BasicApiAuthManager(object):
token_dict = {} token_dict = {}
token_dict['token_hash'] = token_hash token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = '' token_dict['cdn_management_url'] = ''
token_dict['server_management_url'] = self._get_server_mgmt_url() # Same as auth url, e.g. http://foo.org:8774/baz/v1.0
token_dict['server_management_url'] = req.url
token_dict['storage_url'] = '' token_dict['storage_url'] = ''
token_dict['user_id'] = user.id token_dict['user_id'] = user.id
token = self.db.auth_create_token(self.context, token_dict) token = self.db.auth_create_token(self.context, token_dict)
return token, user return token, user
return None, None return None, None
def _get_server_mgmt_url(self):
return 'https://%s/v1.0/' % self.host

View File

@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException):
"""Generate a WSGI response based on the exception passed to ctor.""" """Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details. # Replace the body with fault details.
code = self.wrapped_exc.status_int code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "cloudServersFault") fault_name = self._fault_names.get(code, "computeFault")
fault_data = { fault_data = {
fault_name: { fault_name: {
'code': code, 'code': code,

View File

@ -15,34 +15,17 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time
import webob
from webob import exc from webob import exc
from nova import flags
from nova import rpc
from nova import utils
from nova import wsgi
from nova import context from nova import context
from nova.api import cloud from nova import exception
from nova import wsgi
from nova.api.openstack import faults from nova.api.openstack import faults
from nova.auth import manager as auth_manager
from nova.compute import api as compute_api
from nova.compute import instance_types from nova.compute import instance_types
from nova.compute import power_state from nova.compute import power_state
import nova.api.openstack import nova.api.openstack
import nova.image.service
FLAGS = flags.FLAGS
def _filter_params(inst_dict):
""" Extracts all updatable parameters for a server update request """
keys = dict(name='name', admin_pass='adminPass')
new_attrs = {}
for k, v in keys.items():
if v in inst_dict:
new_attrs[k] = inst_dict[v]
return new_attrs
def _entity_list(entities): def _entity_list(entities):
@ -63,7 +46,7 @@ def _entity_detail(inst):
inst_dict = {} inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id', mapped_keys = dict(status='state', imageId='image_id',
flavorId='instance_type', name='server_name', id='id') flavorId='instance_type', name='display_name', id='internal_id')
for k, v in mapped_keys.iteritems(): for k, v in mapped_keys.iteritems():
inst_dict[k] = inst[v] inst_dict[k] = inst[v]
@ -78,7 +61,7 @@ def _entity_detail(inst):
def _entity_inst(inst): def _entity_inst(inst):
""" Filters all model attributes save for id and name """ """ Filters all model attributes save for id and name """
return dict(server=dict(id=inst['id'], name=inst['server_name'])) return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
class Controller(wsgi.Controller): class Controller(wsgi.Controller):
@ -88,14 +71,10 @@ class Controller(wsgi.Controller):
'application/xml': { 'application/xml': {
"attributes": { "attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId", "server": ["id", "imageId", "name", "flavorId", "hostId",
"status", "progress", "progress"]}}} "status", "progress"]}}}
def __init__(self, db_driver=None): def __init__(self):
if not db_driver: self.compute_api = compute_api.ComputeAPI()
db_driver = FLAGS.db_driver
self.db_driver = utils.import_object(db_driver)
self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_manager = utils.import_object(FLAGS.compute_manager)
super(Controller, self).__init__() super(Controller, self).__init__()
def index(self, req): def index(self, req):
@ -113,7 +92,7 @@ class Controller(wsgi.Controller):
""" """
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id) ctxt = context.RequestContext(user_id, user_id)
instance_list = self.db_driver.instance_get_all_by_user(ctxt, user_id) instance_list = self.compute_api.get_instances(ctxt)
limited_list = nova.api.openstack.limited(instance_list, req) limited_list = nova.api.openstack.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list] res = [entity_maker(inst)['server'] for inst in limited_list]
return _entity_list(res) return _entity_list(res)
@ -122,7 +101,7 @@ class Controller(wsgi.Controller):
""" Returns server details by server id """ """ Returns server details by server id """
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id) ctxt = context.RequestContext(user_id, user_id)
inst = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) inst = self.compute_api.get_instance(ctxt, int(id))
if inst: if inst:
if inst.user_id == user_id: if inst.user_id == user_id:
return _entity_detail(inst) return _entity_detail(inst)
@ -132,52 +111,53 @@ class Controller(wsgi.Controller):
""" Destroys a server """ """ Destroys a server """
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id) ctxt = context.RequestContext(user_id, user_id)
instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) try:
if instance and instance['user_id'] == user_id: self.compute_api.delete_instance(ctxt, int(id))
self.db_driver.instance_destroy(ctxt, id) except exception.NotFound:
return faults.Fault(exc.HTTPAccepted()) return faults.Fault(exc.HTTPNotFound())
return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted()
def create(self, req): def create(self, req):
""" Creates a new server for a given user """ """ Creates a new server for a given user """
env = self._deserialize(req.body, req) env = self._deserialize(req.body, req)
if not env: if not env:
return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPUnprocessableEntity())
#try:
inst = self._build_server_instance(req, env)
#except Exception, e:
# return faults.Fault(exc.HTTPUnprocessableEntity())
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
rpc.cast(context.RequestContext(user_id, user_id), ctxt = context.RequestContext(user_id, user_id)
FLAGS.compute_topic, key_pair = auth_manager.AuthManager.get_key_pairs(ctxt)[0]
{"method": "run_instance", instances = self.compute_api.create_instances(ctxt,
"args": {"instance_id": inst['id']}}) instance_types.get_by_flavor_id(env['server']['flavorId']),
return _entity_inst(inst) env['server']['imageId'],
display_name=env['server']['name'],
description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'])
return _entity_inst(instances[0])
def update(self, req, id): def update(self, req, id):
""" Updates the server name or password """ """ Updates the server name or password """
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id) ctxt = context.RequestContext(user_id, user_id)
inst_dict = self._deserialize(req.body, req) inst_dict = self._deserialize(req.body, req)
if not inst_dict: if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPUnprocessableEntity())
instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) update_dict = {}
if not instance or instance.user_id != user_id: if 'adminPass' in inst_dict['server']:
return faults.Fault(exc.HTTPNotFound()) update_dict['admin_pass'] = inst_dict['server']['adminPass']
if 'name' in inst_dict['server']:
update_dict['display_name'] = inst_dict['server']['name']
self.db_driver.instance_update(ctxt, try:
int(id), self.compute_api.update_instance(ctxt, instance['id'],
_filter_params(inst_dict['server'])) **update_dict)
return faults.Fault(exc.HTTPNoContent()) except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPNoContent()
def action(self, req, id): def action(self, req, id):
""" multi-purpose method used to reboot, rebuild, and """ Multi-purpose method used to reboot, rebuild, and
resize a server """ resize a server """
user_id = req.environ['nova.context']['user']['id'] user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id) ctxt = context.RequestContext(user_id, user_id)
@ -185,92 +165,11 @@ class Controller(wsgi.Controller):
try: try:
reboot_type = input_dict['reboot']['type'] reboot_type = input_dict['reboot']['type']
except Exception: except Exception:
raise faults.Fault(webob.exc.HTTPNotImplemented()) raise faults.Fault(exc.HTTPNotImplemented())
inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) try:
if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): # TODO(gundlach): pass reboot_type, support soft reboot in
# virt driver
self.compute_api.reboot(ctxt, id)
except:
return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPUnprocessableEntity())
cloud.reboot(id) return exc.HTTPAccepted()
def _build_server_instance(self, req, env):
"""Build instance data structure and save it to the data store."""
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = {}
user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id)
flavor_id = env['server']['flavorId']
instance_type, flavor = [(k, v) for k, v in
instance_types.INSTANCE_TYPES.iteritems()
if v['flavorid'] == flavor_id][0]
image_id = env['server']['imageId']
img_service = utils.import_object(FLAGS.image_service)
image = img_service.show(image_id)
if not image:
raise Exception("Image not found")
inst['server_name'] = env['server']['name']
inst['image_id'] = image_id
inst['user_id'] = user_id
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
inst['project_id'] = user_id
inst['state_description'] = 'scheduling'
inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel)
inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk)
inst['reservation_id'] = utils.generate_uid('r')
inst['display_name'] = env['server']['name']
inst['display_description'] = env['server']['name']
#TODO(dietz) this may be ill advised
key_pair_ref = self.db_driver.key_pair_get_all_by_user(
None, user_id)[0]
inst['key_data'] = key_pair_ref['public_key']
inst['key_name'] = key_pair_ref['name']
#TODO(dietz) stolen from ec2 api, see TODO there
inst['security_group'] = 'default'
# Flavor related attributes
inst['instance_type'] = instance_type
inst['memory_mb'] = flavor['memory_mb']
inst['vcpus'] = flavor['vcpus']
inst['local_gb'] = flavor['local_gb']
inst['mac_address'] = utils.generate_mac()
inst['launch_index'] = 0
ref = self.compute_manager.create_instance(ctxt, **inst)
inst['id'] = ref['internal_id']
inst['hostname'] = str(ref['internal_id'])
self.compute_manager.update_instance(ctxt, inst['id'], **inst)
address = self.network_manager.allocate_fixed_ip(ctxt,
inst['id'])
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
network_topic = self._get_network_topic(ctxt)
rpc.call(ctxt,
network_topic,
{"method": "setup_fixed_ip",
"args": {"address": address}})
return inst
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
network_ref = self.network_manager.get_network(context)
host = network_ref['host']
if not host:
host = rpc.call(context,
FLAGS.network_topic,
{"method": "set_network_host",
"args": {"network_id": network_ref['id']}})
return self.db_driver.queue_get_for(context, FLAGS.network_topic, host)

View File

@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users') 'OU for Users')
flags.DEFINE_boolean('ldap_user_modify_only', False,
'Modify attributes for users instead of creating/deleting')
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Projects') 'OU for Projects')
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
@ -89,8 +91,7 @@ class LdapDriver(object):
def get_user(self, uid): def get_user(self, uid):
"""Retrieve user by id""" """Retrieve user by id"""
attr = self.__find_object(self.__uid_to_dn(uid), attr = self.__get_ldap_user(uid)
'(objectclass=novaUser)')
return self.__to_user(attr) return self.__to_user(attr)
def get_user_from_access_key(self, access): def get_user_from_access_key(self, access):
@ -110,7 +111,12 @@ class LdapDriver(object):
"""Retrieve list of users""" """Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree, attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)') '(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs] users = []
for attr in attrs:
user = self.__to_user(attr)
if user is not None:
users.append(user)
return users
def get_projects(self, uid=None): def get_projects(self, uid=None):
"""Retrieve list of projects""" """Retrieve list of projects"""
@ -125,21 +131,52 @@ class LdapDriver(object):
"""Create a user""" """Create a user"""
if self.__user_exists(name): if self.__user_exists(name):
raise exception.Duplicate("LDAP user %s already exists" % name) raise exception.Duplicate("LDAP user %s already exists" % name)
attr = [ if FLAGS.ldap_user_modify_only:
('objectclass', ['person', if self.__ldap_user_exists(name):
'organizationalPerson', # Retrieve user by name
'inetOrgPerson', user = self.__get_ldap_user(name)
'novaUser']), # Entry could be malformed, test for missing attrs.
('ou', [FLAGS.ldap_user_unit]), # Malformed entries are useless, replace attributes found.
('uid', [name]), attr = []
('sn', [name]), if 'secretKey' in user.keys():
('cn', [name]), attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
('secretKey', [secret_key]), [secret_key]))
('accessKey', [access_key]), else:
('isAdmin', [str(is_admin).upper()]), attr.append((self.ldap.MOD_ADD, 'secretKey', \
] [secret_key]))
self.conn.add_s(self.__uid_to_dn(name), attr) if 'accessKey' in user.keys():
return self.__to_user(dict(attr)) attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
[access_key]))
else:
attr.append((self.ldap.MOD_ADD, 'accessKey', \
[access_key]))
if 'isAdmin' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
[str(is_admin).upper()]))
else:
attr.append((self.ldap.MOD_ADD, 'isAdmin', \
[str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
raise exception.NotFound("LDAP object for %s doesn't exist"
% name)
else:
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.ldap_user_unit]),
('uid', [name]),
('sn', [name]),
('cn', [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
('isAdmin', [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
def create_project(self, name, manager_uid, def create_project(self, name, manager_uid,
description=None, member_uids=None): description=None, member_uids=None):
@ -155,7 +192,7 @@ class LdapDriver(object):
if description is None: if description is None:
description = name description = name
members = [] members = []
if member_uids != None: if member_uids is not None:
for member_uid in member_uids: for member_uid in member_uids:
if not self.__user_exists(member_uid): if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created " raise exception.NotFound("Project can't be created "
@ -256,7 +293,24 @@ class LdapDriver(object):
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid) raise exception.NotFound("User %s doesn't exist" % uid)
self.__remove_from_all(uid) self.__remove_from_all(uid)
self.conn.delete_s(self.__uid_to_dn(uid)) if FLAGS.ldap_user_modify_only:
# Delete attributes
attr = []
# Retrieve user by name
user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'secretKey', \
user['secretKey']))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'accessKey', \
user['accessKey']))
if 'isAdmin' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
user['isAdmin']))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
else:
# Delete entry
self.conn.delete_s(self.__uid_to_dn(uid))
def delete_project(self, project_id): def delete_project(self, project_id):
"""Delete a project""" """Delete a project"""
@ -265,7 +319,7 @@ class LdapDriver(object):
self.__delete_group(project_dn) self.__delete_group(project_dn)
def modify_user(self, uid, access_key=None, secret_key=None, admin=None): def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing project""" """Modify an existing user"""
if not access_key and not secret_key and admin is None: if not access_key and not secret_key and admin is None:
return return
attr = [] attr = []
@ -279,11 +333,21 @@ class LdapDriver(object):
def __user_exists(self, uid): def __user_exists(self, uid):
"""Check if user exists""" """Check if user exists"""
return self.get_user(uid) != None return self.get_user(uid) is not None
def __ldap_user_exists(self, uid):
"""Check if the user exists in ldap"""
return self.__get_ldap_user(uid) is not None
def __project_exists(self, project_id): def __project_exists(self, project_id):
"""Check if project exists""" """Check if project exists"""
return self.get_project(project_id) != None return self.get_project(project_id) is not None
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
attr = self.__find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)')
return attr
def __find_object(self, dn, query=None, scope=None): def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query""" """Find an object by dn and query"""
@ -330,12 +394,12 @@ class LdapDriver(object):
def __group_exists(self, dn): def __group_exists(self, dn):
"""Check if group exists""" """Check if group exists"""
return self.__find_object(dn, '(objectclass=groupOfNames)') != None return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
@staticmethod @staticmethod
def __role_to_dn(role, project_id=None): def __role_to_dn(role, project_id=None):
"""Convert role to corresponding dn""" """Convert role to corresponding dn"""
if project_id == None: if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value return FLAGS.__getitem__("ldap_%s" % role).value
else: else:
return 'cn=%s,cn=%s,%s' % (role, return 'cn=%s,cn=%s,%s' % (role,
@ -349,7 +413,7 @@ class LdapDriver(object):
raise exception.Duplicate("Group can't be created because " raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name) "group %s already exists" % name)
members = [] members = []
if member_uids != None: if member_uids is not None:
for member_uid in member_uids: for member_uid in member_uids:
if not self.__user_exists(member_uid): if not self.__user_exists(member_uid):
raise exception.NotFound("Group can't be created " raise exception.NotFound("Group can't be created "
@ -375,7 +439,7 @@ class LdapDriver(object):
res = self.__find_object(group_dn, res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid), '(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE) self.ldap.SCOPE_BASE)
return res != None return res is not None
def __add_to_group(self, uid, group_dn): def __add_to_group(self, uid, group_dn):
"""Add user to group""" """Add user to group"""
@ -447,18 +511,22 @@ class LdapDriver(object):
@staticmethod @staticmethod
def __to_user(attr): def __to_user(attr):
"""Convert ldap attributes to User object""" """Convert ldap attributes to User object"""
if attr == None: if attr is None:
return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
and 'isAdmin' in attr.keys()):
return {
'id': attr['uid'][0],
'name': attr['cn'][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr['isAdmin'][0] == 'TRUE')}
else:
return None return None
return {
'id': attr['uid'][0],
'name': attr['cn'][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr['isAdmin'][0] == 'TRUE')}
def __to_project(self, attr): def __to_project(self, attr):
"""Convert ldap attributes to Project object""" """Convert ldap attributes to Project object"""
if attr == None: if attr is None:
return None return None
member_dns = attr.get('member', []) member_dns = attr.get('member', [])
return { return {

View File

@ -624,6 +624,10 @@ class AuthManager(object):
with self.driver() as drv: with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin) drv.modify_user(uid, access_key, secret_key, admin)
@staticmethod
def get_key_pairs(context):
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
def get_credentials(self, user, project=None): def get_credentials(self, user, project=None):
"""Get credential zip for user in project""" """Get credential zip for user in project"""
if not isinstance(user, User): if not isinstance(user, User):

View File

@ -0,0 +1,84 @@
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
#
#
# using internet experimental oid arc as per BP64 3.1
objectidentifier novaSchema 1.3.6.1.3.1.666.666
objectidentifier novaAttrs novaSchema:3
objectidentifier novaOCs novaSchema:4
attributetype (
novaAttrs:1
NAME 'accessKey'
DESC 'Key for accessing data'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:2
NAME 'secretKey'
DESC 'Secret key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:4
NAME 'isAdmin'
DESC 'Is user an administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
MUST ( uid )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupOfNames
STRUCTURAL
MUST ( cn $ projectManager )
)

16
nova/auth/nova_sun.schema Normal file
View File

@ -0,0 +1,16 @@
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
#
# using internet experimental oid arc as per BP64 3.1
dn: cn=schema
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )

119
nova/auth/opendj.sh Executable file
View File

@ -0,0 +1,119 @@
#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users
apt-get install -y ldap-utils python-ldap openjdk-6-jre
if [ ! -d "/usr/opendj" ]
then
# TODO(rlane): Wikimedia Foundation is the current package maintainer.
# After the package is included in Ubuntu's channel, change this.
wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb
dpkg -i opendj_2.4.0-7_amd64.deb
fi
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
schemapath='/var/opendj/instance/config/schema'
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
chown opendj:opendj $schemapath/98-nova_sun.ldif
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
# LDAP Client Settings
URI ldap://localhost
BASE dc=example,dc=com
BINDDN cn=Directory Manager
SIZELIMIT 0
TIMELIMIT 0
LDAP_CONF_EOF
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
# This is the root of the directory tree
dn: dc=example,dc=com
description: Example.Com, your trusted non-existent corporation.
dc: example
o: Example.Com
objectClass: top
objectClass: dcObject
objectClass: organization
# Subtree for users
dn: ou=Users,dc=example,dc=com
ou: Users
description: Users
objectClass: organizationalUnit
# Subtree for groups
dn: ou=Groups,dc=example,dc=com
ou: Groups
description: Groups
objectClass: organizationalUnit
# Subtree for system accounts
dn: ou=System,dc=example,dc=com
ou: System
description: Special accounts used by software applications.
objectClass: organizationalUnit
# Special Account for Authentication:
dn: uid=authenticate,ou=System,dc=example,dc=com
uid: authenticate
ou: System
description: Special account for authenticating users
userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
objectClass: account
objectClass: simpleSecurityObject
# create the sysadmin entry
dn: cn=developers,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: developers
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: sysadmins
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=netadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: netadmins
description: Network admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: cloudadmins
description: Cloud admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=itsec,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: itsec
description: IT security users group
member: uid=admin,ou=Users,dc=example,dc=com
BASE_LDIF_EOF
/etc/init.d/opendj stop
su - opendj -c '/usr/opendj/setup -i -b "dc=example,dc=com" -l /etc/ldap/base.ldif -S -w changeme -O -n --noPropertiesFile'
/etc/init.d/opendj start

View File

@ -0,0 +1,19 @@
#
# LDAP Public Key Patch schema for use with openssh-ldappubkey
# Author: Eric AUGE <eau@phear.org>
#
# Based on the proposal of : Mark Ruijter
#
# octetString SYNTAX
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
DESC 'MANDATORY: OpenSSH Public key'
EQUALITY octetStringMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
# printableString SYNTAX yes|no
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
DESC 'MANDATORY: OpenSSH LPK objectclass'
MAY ( sshPublicKey $ uid )
)

View File

@ -0,0 +1,10 @@
#
# LDAP Public Key Patch schema for use with openssh-ldappubkey
# Author: Eric AUGE <eau@phear.org>
#
# Schema for Sun Directory Server.
# Based on the original schema, modified by Stefan Fischer.
#
dn: cn=schema
attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) )

View File

@ -20,115 +20,9 @@
apt-get install -y slapd ldap-utils python-ldap apt-get install -y slapd ldap-utils python-ldap
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
# cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
# LDAP Public Key Patch schema for use with openssh-ldappubkey cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
# Author: Eric AUGE <eau@phear.org>
#
# Based on the proposal of : Mark Ruijter
#
# octetString SYNTAX
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
DESC 'MANDATORY: OpenSSH Public key'
EQUALITY octetStringMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
# printableString SYNTAX yes|no
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
DESC 'MANDATORY: OpenSSH LPK objectclass'
MAY ( sshPublicKey $ uid )
)
LPK_SCHEMA_EOF
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
#
#
# using internet experimental oid arc as per BP64 3.1
objectidentifier novaSchema 1.3.6.1.3.1.666.666
objectidentifier novaAttrs novaSchema:3
objectidentifier novaOCs novaSchema:4
attributetype (
novaAttrs:1
NAME 'accessKey'
DESC 'Key for accessing data'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:2
NAME 'secretKey'
DESC 'Secret key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:4
NAME 'isAdmin'
DESC 'Is user an administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
MUST ( uid )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupOfNames
STRUCTURAL
MUST ( cn $ projectManager )
)
NOVA_SCHEMA_EOF
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF

305
nova/compute/api.py Normal file
View File

@ -0,0 +1,305 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all API requests relating to instances (guest vms).
"""
import datetime
import logging
import time
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import rpc
from nova import utils
from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
def generate_default_hostname(internal_id):
"""Default function to generate a hostname given an instance reference."""
return str(internal_id)
class ComputeAPI(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, network_manager=None, image_service=None, **kwargs):
if not network_manager:
network_manager = utils.import_object(FLAGS.network_manager)
self.network_manager = network_manager
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
self.image_service = image_service
super(ComputeAPI, self).__init__(**kwargs)
def create_instances(self, context, instance_type, image_id, min_count=1,
max_count=1, kernel_id=None, ramdisk_id=None,
display_name='', description='', key_name=None,
key_data=None, security_group='default',
generate_hostname=generate_default_hostname):
"""Create the number of instances requested if quote and
other arguments check out ok."""
num_instances = quota.allowed_instances(context, max_count,
instance_type)
if num_instances < min_count:
logging.warn("Quota exceeeded for %s, tried to run %s instances",
context.project_id, min_count)
raise quota.QuotaError("Instance quota exceeded. You can only "
"run %s more instances of this type." %
num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn:
image = self.image_service.show(context, image_id)
if kernel_id is None:
kernel_id = image.get('kernelId', FLAGS.default_kernel)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# Make sure we have access to kernel and ramdisk
self.image_service.show(context, kernel_id)
self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
if not type(security_group) is list:
security_group = [security_group]
security_groups = []
self.ensure_default_security_group(context)
for security_group_name in security_group:
group = db.security_group_get_by_name(context,
context.project_id,
security_group_name)
security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
type_data = instance_types.INSTANCE_TYPES[instance_type]
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type': instance_type,
'memory_mb': type_data['memory_mb'],
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
'display_name': display_name,
'display_description': description,
'key_name': key_name,
'key_data': key_data}
elevated = context.elevated()
instances = []
logging.debug("Going to run %s instances...", num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
**base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
internal_id = instance['internal_id']
elevated = context.elevated()
if not security_groups:
security_groups = []
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
security_group_id)
# Set sane defaults if not specified
updates = dict(hostname=generate_hostname(internal_id))
if 'display_name' not in instance:
updates['display_name'] = "Server %s" % internal_id
instance = self.update_instance(context, instance_id, **updates)
instances.append(instance)
# TODO(vish): This probably should be done in the scheduler
# or in compute as a call. The network should be
# allocated after the host is assigned and setup
# can happen at the same time.
address = self.network_manager.allocate_fixed_ip(context,
instance_id,
is_vpn)
rpc.cast(elevated,
self._get_network_topic(context),
{"method": "setup_fixed_ip",
"args": {"address": address}})
logging.debug("Casting to scheduler for %s/%s's instance %s",
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id}})
return instances
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
does not already exist
:param context: the security context
"""
try:
db.security_group_get_by_name(context, context.project_id,
'default')
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
db.security_group_create(context, values)
def update_instance(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance_id: ID of the instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:retval None
"""
return self.db.instance_update(context, instance_id, kwargs)
def delete_instance(self, context, instance_id):
logging.debug("Going to try and terminate %d" % instance_id)
try:
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
logging.warning("Instance %d was not found during terminate",
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
logging.warning("Instance %d is already being terminated",
instance_id)
return
self.update_instance(context,
instance['id'],
state_description='terminating',
state=0,
terminated_at=datetime.datetime.utcnow())
# FIXME(ja): where should network deallocate occur?
address = self.db.instance_get_floating_address(context,
instance['id'])
if address:
logging.debug("Disassociating address %s" % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
rpc.cast(context,
self._get_network_topic(context),
{"method": "disassociate_floating_ip",
"args": {"floating_address": address}})
address = self.db.instance_get_fixed_address(context, instance['id'])
if address:
logging.debug("Deallocating address %s" % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
host = instance['host']
if host:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"instance_id": instance['id']}})
else:
self.db.instance_destroy(context, instance['id'])
def get_instances(self, context, project_id=None):
"""Get all instances, possibly filtered by project ID or
user ID. If there is no filter and the context is an admin,
it will retreive all instances in the system."""
if project_id or not context.is_admin:
if not context.project:
return self.db.instance_get_all_by_user(context,
context.user_id)
if project_id is None:
project_id = context.project_id
return self.db.instance_get_all_by_project(context, project_id)
return self.db.instance_get_all(context)
def get_instance(self, context, instance_id):
return self.db.instance_get_by_internal_id(context, instance_id)
def reboot(self, context, instance_id):
"""Reboot the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance['id']}})
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
"args": {"instance_id": instance['id']}})
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
network_ref = self.network_manager.get_network(context)
host = network_ref['host']
if not host:
host = rpc.call(context,
FLAGS.network_topic,
{"method": "set_network_host",
"args": {"network_id": network_ref['id']}})
return self.db.queue_get_for(context, FLAGS.network_topic, host)

View File

@ -21,9 +21,29 @@
The built-in instance properties. The built-in instance properties.
""" """
from nova import flags
FLAGS = flags.FLAGS
INSTANCE_TYPES = { INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
def get_by_type(instance_type):
"""Build instance data structure and save it to the data store."""
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
raise exception.ApiError("Unknown instance type: %s",
instance_type)
return instance_type
def get_by_flavor_id(flavor_id):
for instance_type, details in INSTANCE_TYPES.iteritems():
if details['flavorid'] == flavor_id:
return instance_type
return FLAGS.default_instance_type

View File

@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver, building a disk image, launching it via the underlying virtualization driver,
responding to calls to check it state, attaching persistent as well as responding to calls to check its state, attaching persistent storage, and
termination. terminating it.
**Related Flags** **Related Flags**
@ -45,15 +45,15 @@ from nova import manager
from nova import utils from nova import utils
from nova.compute import power_state from nova.compute import power_state
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', utils.abspath('../instances'), flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk') 'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for volume creation') 'Driver to use for controlling virtualization')
class ComputeManager(manager.Manager): class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction.""" """Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs): def __init__(self, compute_driver=None, *args, **kwargs):
@ -84,47 +84,6 @@ class ComputeManager(manager.Manager):
"""This call passes stright through to the virtualization driver.""" """This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id) yield self.driver.refresh_security_group(security_group_id)
def create_instance(self, context, security_groups=None, **kwargs):
"""Creates the instance in the datastore and returns the
new instance as a mapping
:param context: The security context
:param security_groups: list of security group ids to
attach to the instance
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
created
:retval Returns a mapping of the instance information
that has just been created
"""
instance_ref = self.db.instance_create(context, kwargs)
inst_id = instance_ref['id']
elevated = context.elevated()
if not security_groups:
security_groups = []
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
inst_id,
security_group_id)
return instance_ref
def update_instance(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance_id: ID of the instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:retval None
"""
self.db.instance_update(context, instance_id, kwargs)
@defer.inlineCallbacks @defer.inlineCallbacks
@exception.wrap_exception @exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs): def run_instance(self, context, instance_id, **_kwargs):
@ -134,7 +93,6 @@ class ComputeManager(manager.Manager):
if instance_ref['name'] in self.driver.list_instances(): if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created") raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id) logging.debug("instance %s: starting...", instance_id)
project_id = instance_ref['project_id']
self.network_manager.setup_compute_network(context, instance_id) self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context, self.db.instance_update(context,
instance_id, instance_id,
@ -176,7 +134,6 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id) self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed' raise exception.Error('trying to destroy already destroyed'
' instance: %s' % instance_id) ' instance: %s' % instance_id)
yield self.driver.destroy(instance_ref) yield self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit? # TODO(ja): should we keep it in a terminated state for a bit?

View File

@ -46,7 +46,7 @@ flags.DEFINE_integer('monitoring_instances_delay', 5,
'Sleep time between updates') 'Sleep time between updates')
flags.DEFINE_integer('monitoring_instances_step', 300, flags.DEFINE_integer('monitoring_instances_step', 300,
'Interval of RRD updates') 'Interval of RRD updates')
flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances',
'Location of RRD files') 'Location of RRD files')

View File

@ -40,9 +40,9 @@ from nova import flags
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), flags.DEFINE_string('keys_path', '$state_path/keys',
'Where we keep our keys') 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), flags.DEFINE_string('ca_path', '$state_path/CA',
'Where we keep our root CA') 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, flags.DEFINE_boolean('use_intermediate_ca', False,
'Should we use intermediate CAs for each project?') 'Should we use intermediate CAs for each project?')

36
nova/db/base.py Normal file
View File

@ -0,0 +1,36 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base class for classes that need modular database access.
"""
from nova import utils
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('db_driver', 'nova.db.api',
'driver to use for database access')
class Base(object):
"""DB driver is injected in the init method"""
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103

View File

@ -530,6 +530,12 @@ def fixed_ip_update(context, address, values):
#functions between the two of them as well. #functions between the two of them as well.
@require_context @require_context
def instance_create(context, values): def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
'internal_id' is auto-generated and should not be specified.
"""
instance_ref = models.Instance() instance_ref = models.Instance()
instance_ref.update(values) instance_ref.update(values)
@ -537,7 +543,7 @@ def instance_create(context, values):
with session.begin(): with session.begin():
while instance_ref.internal_id == None: while instance_ref.internal_id == None:
# Instances have integer internal ids. # Instances have integer internal ids.
internal_id = random.randint(0, 2 ** 32 - 1) internal_id = random.randint(0, 2 ** 31 - 1)
if not instance_internal_id_exists(context, internal_id, if not instance_internal_id_exists(context, internal_id,
session=session): session=session):
instance_ref.internal_id = internal_id instance_ref.internal_id = internal_id
@ -726,6 +732,7 @@ def instance_update(context, instance_id, values):
instance_ref = instance_get(context, instance_id, session=session) instance_ref = instance_get(context, instance_id, session=session)
instance_ref.update(values) instance_ref.update(values)
instance_ref.save(session=session) instance_ref.save(session=session)
return instance_ref
def instance_add_security_group(context, instance_id, security_group_id): def instance_add_security_group(context, instance_id, security_group_id):

View File

@ -178,8 +178,6 @@ class Instance(BASE, NovaBase):
kernel_id = Column(String(255)) kernel_id = Column(String(255))
ramdisk_id = Column(String(255)) ramdisk_id = Column(String(255))
server_name = Column(String(255))
# image_id = Column(Integer, ForeignKey('images.id'), nullable=True) # image_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
@ -212,6 +210,7 @@ class Instance(BASE, NovaBase):
launched_at = Column(DateTime) launched_at = Column(DateTime)
terminated_at = Column(DateTime) terminated_at = Column(DateTime)
# User editable field for display in user-facing UIs
display_name = Column(String(255)) display_name = Column(String(255))
display_description = Column(String(255)) display_description = Column(String(255))

View File

@ -24,6 +24,7 @@ where they're used.
import getopt import getopt
import os import os
import socket import socket
import string
import sys import sys
import gflags import gflags
@ -38,11 +39,12 @@ class FlagValues(gflags.FlagValues):
""" """
def __init__(self): def __init__(self, extra_context=None):
gflags.FlagValues.__init__(self) gflags.FlagValues.__init__(self)
self.__dict__['__dirty'] = [] self.__dict__['__dirty'] = []
self.__dict__['__was_already_parsed'] = False self.__dict__['__was_already_parsed'] = False
self.__dict__['__stored_argv'] = [] self.__dict__['__stored_argv'] = []
self.__dict__['__extra_context'] = extra_context
def __call__(self, argv): def __call__(self, argv):
# We're doing some hacky stuff here so that we don't have to copy # We're doing some hacky stuff here so that we don't have to copy
@ -112,7 +114,7 @@ class FlagValues(gflags.FlagValues):
def ParseNewFlags(self): def ParseNewFlags(self):
if '__stored_argv' not in self.__dict__: if '__stored_argv' not in self.__dict__:
return return
new_flags = FlagValues() new_flags = FlagValues(self)
for k in self.__dict__['__dirty']: for k in self.__dict__['__dirty']:
new_flags[k] = gflags.FlagValues.__getitem__(self, k) new_flags[k] = gflags.FlagValues.__getitem__(self, k)
@ -134,9 +136,29 @@ class FlagValues(gflags.FlagValues):
def __getattr__(self, name): def __getattr__(self, name):
if self.IsDirty(name): if self.IsDirty(name):
self.ParseNewFlags() self.ParseNewFlags()
return gflags.FlagValues.__getattr__(self, name) val = gflags.FlagValues.__getattr__(self, name)
if type(val) is str:
tmpl = string.Template(val)
context = [self, self.__dict__['__extra_context']]
return tmpl.substitute(StrWrapper(context))
return val
class StrWrapper(object):
"""Wrapper around FlagValues objects
Wraps FlagValues objects for string.Template so that we're
sure to return strings."""
def __init__(self, context_objs):
self.context_objs = context_objs
def __getitem__(self, name):
for context in self.context_objs:
val = getattr(context, name, False)
if val:
return str(val)
raise KeyError(name)
FLAGS = FlagValues() FLAGS = FlagValues()
gflags.FLAGS = FLAGS gflags.FLAGS = FLAGS
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
@ -201,8 +223,6 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
'Url to ec2 api server') 'Url to ec2 api server')
@ -222,8 +242,11 @@ DEFINE_string('vpn_key_suffix',
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state")
DEFINE_string('sql_connection', DEFINE_string('sql_connection',
'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), 'sqlite:///$state_path/nova.sqlite',
'connection string for sql database') 'connection string for sql database')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
@ -236,7 +259,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler') 'Manager for scheduler')
# The service to use for image search and retrieval # The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.local.LocalImageService', DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
'The service to use for retrieving and searching for images.') 'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(), DEFINE_string('host', socket.gethostname(),

View File

@ -59,7 +59,7 @@ class LocalImageService(service.BaseImageService):
""" """
Store the image data and return the new image id. Store the image data and return the new image id.
""" """
id = random.randint(0, 2 ** 32 - 1) id = random.randint(0, 2 ** 31 - 1)
data['id'] = id data['id'] = id
self.update(context, id, data) self.update(context, id, data)
return id return id

View File

@ -53,23 +53,19 @@ This module provides Manager, a base class for managers.
from nova import utils from nova import utils
from nova import flags from nova import flags
from nova.db import base
from twisted.internet import defer from twisted.internet import defer
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('db_driver', 'nova.db.api',
'driver to use for volume creation')
class Manager(object): class Manager(base.Base):
"""DB driver is injected in the init method"""
def __init__(self, host=None, db_driver=None): def __init__(self, host=None, db_driver=None):
if not host: if not host:
host = FLAGS.host host = FLAGS.host
self.host = host self.host = host
if not db_driver: super(Manager, self).__init__(db_driver)
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
@defer.inlineCallbacks @defer.inlineCallbacks
def periodic_tasks(self, context=None): def periodic_tasks(self, context=None):

View File

@ -38,14 +38,16 @@ flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf', '/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge') 'location of flagfile for dhcpbridge')
flags.DEFINE_string('networks_path', utils.abspath('../networks'), flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files') 'Location to keep network config files')
flags.DEFINE_string('public_interface', 'vlan1', flags.DEFINE_string('public_interface', 'vlan1',
'Interface for public IP addresses') 'Interface for public IP addresses')
flags.DEFINE_string('bridge_dev', 'eth0', flags.DEFINE_string('vlan_interface', 'eth0',
'network device for bridges') 'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge') 'location of nova-dhcpbridge')
flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
flags.DEFINE_string('routing_source_ip', '127.0.0.1', flags.DEFINE_string('routing_source_ip', '127.0.0.1',
'Public IP of network host') 'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False, flags.DEFINE_bool('use_nova_chains', False,
@ -54,14 +56,15 @@ flags.DEFINE_bool('use_nova_chains', False,
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
def init_host(): def metadata_forward():
"""Basic networking setup goes here""" """Create forwarding rule for metadata"""
# NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port
# forwarding entries and a default DNAT entry.
_confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
"-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
"--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port))
def init_host():
"""Basic networking setup goes here"""
# NOTE(devcamcar): Cloud public SNAT entries and the default # NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic. # SNAT rule for outbound traffic.
_confirm_rule("POSTROUTING", "-t nat -s %s " _confirm_rule("POSTROUTING", "-t nat -s %s "
@ -134,7 +137,7 @@ def ensure_vlan(vlan_num):
if not _device_exists(interface): if not _device_exists(interface):
logging.debug("Starting VLAN inteface %s", interface) logging.debug("Starting VLAN inteface %s", interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, vlan_num)) _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface) _execute("sudo ifconfig %s up" % interface)
return interface return interface
@ -142,12 +145,13 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None): def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists""" """Create a bridge unless it already exists"""
if not _device_exists(bridge): if not _device_exists(bridge):
logging.debug("Starting Bridge inteface for %s", interface) logging.debug("Starting Bridge interface for %s", interface)
_execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge) _execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge) # _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge) _execute("sudo brctl stp %s off" % bridge)
_execute("sudo brctl addif %s %s" % (bridge, interface)) if interface:
_execute("sudo brctl addif %s %s" % (bridge, interface))
if net_attrs: if net_attrs:
_execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
(bridge, (bridge,

View File

@ -27,6 +27,7 @@ topologies. All of the network commands are issued to a subclass of
:network_driver: Driver to use for network creation :network_driver: Driver to use for network creation
:flat_network_bridge: Bridge device for simple network instances :flat_network_bridge: Bridge device for simple network instances
:flat_interface: FlatDhcp will bridge into this interface if set
:flat_network_dns: Dns for simple network :flat_network_dns: Dns for simple network
:flat_network_dhcp_start: Dhcp start for FlatDhcp :flat_network_dhcp_start: Dhcp start for FlatDhcp
:vlan_start: First VLAN for private networks :vlan_start: First VLAN for private networks
@ -63,7 +64,11 @@ flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances') 'Bridge for simple network instances')
flags.DEFINE_string('flat_network_dns', '8.8.4.4', flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network') 'Dns for simple network')
flags.DEFINE_string('flat_network_dhcp_start', '192.168.0.2', flags.DEFINE_bool('flat_injected', True,
'Whether to attempt to inject network setup into guest')
flags.DEFINE_string('flat_interface', None,
'FlatDhcp will bridge into this interface if set')
flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp') 'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
@ -175,9 +180,11 @@ class NetworkManager(manager.Manager):
if instance_ref['mac_address'] != mac: if instance_ref['mac_address'] != mac:
raise exception.Error("IP %s leased to bad mac %s vs %s" % raise exception.Error("IP %s leased to bad mac %s vs %s" %
(address, instance_ref['mac_address'], mac)) (address, instance_ref['mac_address'], mac))
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context, self.db.fixed_ip_update(context,
fixed_ip_ref['address'], fixed_ip_ref['address'],
{'leased': True}) {'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']: if not fixed_ip_ref['allocated']:
logging.warn("IP %s leased that was already deallocated", address) logging.warn("IP %s leased that was already deallocated", address)
@ -246,7 +253,31 @@ class NetworkManager(manager.Manager):
class FlatManager(NetworkManager): class FlatManager(NetworkManager):
"""Basic network where no vlans are used.""" """Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridge is specified in
flat_network_bridge (br100 by default). This bridge needs to be created
on all compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the a network
with new logic instead of network_get_by_bridge. Arbitrary lists of
addresses in a single network can be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool.""" """Gets a fixed ip from the pool."""
@ -285,6 +316,7 @@ class FlatManager(NetworkManager):
cidr = "%s/%s" % (fixed_net[start], significant_bits) cidr = "%s/%s" % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr) project_net = IPy.IP(cidr)
net = {} net = {}
net['bridge'] = FLAGS.flat_network_bridge
net['cidr'] = cidr net['cidr'] = cidr
net['netmask'] = str(project_net.netmask()) net['netmask'] = str(project_net.netmask())
net['gateway'] = str(project_net[1]) net['gateway'] = str(project_net[1])
@ -306,18 +338,36 @@ class FlatManager(NetworkManager):
def _on_set_network_host(self, context, network_id): def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network.""" """Called when this host becomes the host for a network."""
net = {} net = {}
net['injected'] = True net['injected'] = FLAGS.flat_injected
net['bridge'] = FLAGS.flat_network_bridge
net['dns'] = FLAGS.flat_network_dns net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net) self.db.network_update(context, network_id, net)
class FlatDHCPManager(NetworkManager): class FlatDHCPManager(FlatManager):
"""Flat networking with dhcp.""" """Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. Otherwise it behaves
like FlatDHCPManager.
"""
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
super(FlatDHCPManager, self).init_host()
self.driver.metadata_forward()
def setup_compute_network(self, context, instance_id):
"""Sets up matching network for compute hosts."""
network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface,
network_ref)
def setup_fixed_ip(self, context, address): def setup_fixed_ip(self, context, address):
"""Setup dhcp for this network.""" """Setup dhcp for this network."""
network_ref = db.fixed_ip_get_by_address(context, address) network_ref = db.fixed_ip_get_network(context, address)
self.driver.update_dhcp(context, network_ref['id']) self.driver.update_dhcp(context, network_ref['id'])
def deallocate_fixed_ip(self, context, address, *args, **kwargs): def deallocate_fixed_ip(self, context, address, *args, **kwargs):
@ -326,18 +376,28 @@ class FlatDHCPManager(NetworkManager):
def _on_set_network_host(self, context, network_id): def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a project.""" """Called when this host becomes the host for a project."""
super(FlatDHCPManager, self)._on_set_network_host(context, network_id) net = {}
network_ref = self.db.network_get(context, network_id) net['dhcp_start'] = FLAGS.flat_network_dhcp_start
self.db.network_update(context, self.db.network_update(context, network_id, net)
network_id, network_ref = db.network_get(context, network_id)
{'dhcp_start': FLAGS.flat_network_dhcp_start})
self.driver.ensure_bridge(network_ref['bridge'], self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.bridge_dev, FLAGS.flat_interface,
network_ref) network_ref)
class VlanManager(NetworkManager): class VlanManager(NetworkManager):
"""Vlan network with dhcp.""" """Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
vlan for each project. Each project gets its own subnet. The networks
and associated subnets are created with nova-manage using a command like:
nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
of 16 addresses from the beginning of the 10.0.0.0 range.
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
@defer.inlineCallbacks @defer.inlineCallbacks
def periodic_tasks(self, context=None): def periodic_tasks(self, context=None):
@ -357,6 +417,7 @@ class VlanManager(NetworkManager):
standalone service. standalone service.
""" """
super(VlanManager, self).init_host() super(VlanManager, self).init_host()
self.driver.metadata_forward()
self.driver.init_host() self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):

View File

@ -33,7 +33,7 @@ from nova.objectstore import stored
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), flags.DEFINE_string('buckets_path', '$state_path/buckets',
'path to s3 buckets') 'path to s3 buckets')

View File

@ -39,8 +39,8 @@ from nova.objectstore import bucket
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', utils.abspath('../images'), flags.DEFINE_string('images_path', '$state_path/images',
'path to decrypted images') 'path to decrypted images')
class Image(object): class Image(object):

View File

@ -94,3 +94,8 @@ def allowed_floating_ips(context, num_floating_ips):
quota = get_quota(context, project_id) quota = get_quota(context, project_id)
allowed_floating_ips = quota['floating_ips'] - used_floating_ips allowed_floating_ips = quota['floating_ips'] - used_floating_ips
return min(num_floating_ips, allowed_floating_ips) return min(num_floating_ips, allowed_floating_ips)
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass

View File

@ -54,7 +54,6 @@ def fake_auth_init(self):
self.db = FakeAuthDatabase() self.db = FakeAuthDatabase()
self.context = Context() self.context = Context()
self.auth = FakeAuthManager() self.auth = FakeAuthManager()
self.host = 'foo'
@webob.dec.wsgify @webob.dec.wsgify
@ -68,12 +67,11 @@ def fake_wsgi(self, req):
def stub_out_key_pair_funcs(stubs): def stub_out_key_pair_funcs(stubs):
def key_pair(context, user_id): def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')] return [dict(name='key', public_key='public_key')]
stubs.Set(nova.db.api, 'key_pair_get_all_by_user', stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
key_pair)
def stub_out_image_service(stubs): def stub_out_image_service(stubs):
def fake_image_show(meh, id): def fake_image_show(meh, context, id):
return dict(kernelId=1, ramdiskId=1) return dict(kernelId=1, ramdiskId=1)
stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show) stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)

View File

@ -50,12 +50,12 @@ class APITest(unittest.TestCase):
api.application = succeed api.application = succeed
resp = Request.blank('/').get_response(api) resp = Request.blank('/').get_response(api)
self.assertFalse('cloudServersFault' in resp.body, resp.body) self.assertFalse('computeFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 200, resp.body) self.assertEqual(resp.status_int, 200, resp.body)
api.application = raise_webob_exc api.application = raise_webob_exc
resp = Request.blank('/').get_response(api) resp = Request.blank('/').get_response(api)
self.assertFalse('cloudServersFault' in resp.body, resp.body) self.assertFalse('computeFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 404, resp.body) self.assertEqual(resp.status_int, 404, resp.body)
api.application = raise_api_fault api.application = raise_api_fault
@ -65,10 +65,10 @@ class APITest(unittest.TestCase):
api.application = fail api.application = fail
resp = Request.blank('/').get_response(api) resp = Request.blank('/').get_response(api)
self.assertTrue('{"cloudServersFault' in resp.body, resp.body) self.assertTrue('{"computeFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 500, resp.body) self.assertEqual(resp.status_int, 500, resp.body)
api.application = fail api.application = fail
resp = Request.blank('/.xml').get_response(api) resp = Request.blank('/.xml').get_response(api)
self.assertTrue('<cloudServersFault' in resp.body, resp.body) self.assertTrue('<computeFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 500, resp.body) self.assertEqual(resp.status_int, 500, resp.body)

View File

@ -62,14 +62,14 @@ class Test(unittest.TestCase):
f = fakes.FakeAuthManager() f = fakes.FakeAuthManager()
f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
req = webob.Request.blank('/v1.0/') req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp' req.headers['X-Auth-Key'] = 'derp'
result = req.get_response(nova.api.API('os')) result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '204 No Content') self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-Server-Management-Url'], self.assertEqual(result.headers['X-Server-Management-Url'],
"https://foo/v1.0/") "http://foo/v1.0/")
self.assertEqual(result.headers['X-CDN-Management-Url'], self.assertEqual(result.headers['X-CDN-Management-Url'],
"") "")
self.assertEqual(result.headers['X-Storage-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "")

View File

@ -43,9 +43,21 @@ def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)] return [stub_instance(i, user_id) for i in xrange(5)]
def return_security_group(context, instance_id, security_group_id):
pass
def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
def instance_address(context, instance_id):
return None
def stub_instance(id, user_id=1): def stub_instance(id, user_id=1):
return Instance(id=id, state=0, image_id=10, server_name='server%s' % id, return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
user_id=user_id) display_name='server%s' % id, internal_id=id)
class ServersTest(unittest.TestCase): class ServersTest(unittest.TestCase):
@ -63,6 +75,13 @@ class ServersTest(unittest.TestCase):
return_server) return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user', self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers) return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
return_security_group)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
self.stubs.Set(nova.db.api, 'instance_get_fixed_address',
instance_address)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
def tearDown(self): def tearDown(self):
self.stubs.UnsetAll() self.stubs.UnsetAll()
@ -87,11 +106,11 @@ class ServersTest(unittest.TestCase):
i += 1 i += 1
def test_create_instance(self): def test_create_instance(self):
def server_update(context, id, params):
pass
def instance_create(context, inst): def instance_create(context, inst):
return {'id': 1, 'internal_id': 1} return {'id': 1, 'internal_id': 1, 'display_name': ''}
def server_update(context, id, params):
return instance_create(context, id)
def fake_method(*args, **kwargs): def fake_method(*args, **kwargs):
pass pass

View File

@ -31,6 +31,7 @@ from nova import flags
from nova import test from nova import test
from nova import utils from nova import utils
from nova.auth import manager from nova.auth import manager
from nova.compute import api as compute_api
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase):
self.flags(connection_type='fake', self.flags(connection_type='fake',
network_manager='nova.network.manager.FlatManager') network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager) self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute_api.ComputeAPI()
self.manager = manager.AuthManager() self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake') self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake')
@ -66,26 +68,31 @@ class ComputeTestCase(test.TrialTestCase):
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
ref = self.compute_api.create_instances(self.context,
FLAGS.default_instance_type, None, **instance)
try:
self.assertNotEqual(ref[0].display_name, None)
finally:
db.instance_destroy(self.context, ref[0]['id'])
def test_create_instance_associates_security_groups(self): def test_create_instance_associates_security_groups(self):
"""Make sure create_instance associates security groups""" """Make sure create_instances associates security groups"""
inst = {}
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
values = {'name': 'default', values = {'name': 'default',
'description': 'default', 'description': 'default',
'user_id': self.user.id, 'user_id': self.user.id,
'project_id': self.project.id} 'project_id': self.project.id}
group = db.security_group_create(self.context, values) group = db.security_group_create(self.context, values)
ref = self.compute.create_instance(self.context, ref = self.compute_api.create_instances(self.context,
security_groups=[group['id']], FLAGS.default_instance_type, None, security_group=['default'])
**inst)
# reload to get groups
instance_ref = db.instance_get(self.context, ref['id'])
try: try:
self.assertEqual(len(instance_ref['security_groups']), 1) self.assertEqual(len(ref[0]['security_groups']), 1)
finally: finally:
db.security_group_destroy(self.context, group['id']) db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, instance_ref['id']) db.instance_destroy(self.context, ref[0]['id'])
@defer.inlineCallbacks @defer.inlineCallbacks
def test_run_terminate(self): def test_run_terminate(self):

View File

@ -15,7 +15,6 @@
# under the License. # under the License.
import os import os
import subprocess
from nova import test from nova import test
from nova.utils import parse_mailmap, str_dict_replace from nova.utils import parse_mailmap, str_dict_replace
@ -24,18 +23,23 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TrialTestCase): class ProjectTestCase(test.TrialTestCase):
def test_authors_up_to_date(self): def test_authors_up_to_date(self):
if os.path.exists('../.bzr'): if os.path.exists('../.bzr'):
log_cmd = subprocess.Popen(["bzr", "log", "-n0"], contributors = set()
stdout=subprocess.PIPE)
changelog = log_cmd.communicate()[0]
mailmap = parse_mailmap('../.mailmap') mailmap = parse_mailmap('../.mailmap')
contributors = set() import bzrlib.workingtree
for l in changelog.split('\n'): tree = bzrlib.workingtree.WorkingTree.open('..')
l = l.strip() tree.lock_read()
if (l.startswith('author:') or l.startswith('committer:') parents = tree.get_parent_ids()
and not l == 'committer: Tarmac'): g = tree.branch.repository.get_graph()
email = l.split(' ')[-1] for p in parents[1:]:
contributors.add(str_dict_replace(email, mailmap)) rev_ids = [r for r, _ in g.iter_ancestry(parents)
if r != "null:"]
revs = tree.branch.repository.get_revisions(rev_ids)
for r in revs:
for author in r.get_apparent_authors():
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
authors_file = open('../Authors', 'r').read() authors_file = open('../Authors', 'r').read()

View File

@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_instances): for i in range(FLAGS.quota_instances):
instance_id = self._create_instance() instance_id = self._create_instance()
instance_ids.append(instance_id) instance_ids.append(instance_id)
self.assertRaises(cloud.QuotaError, self.cloud.run_instances, self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context, self.context,
min_count=1, min_count=1,
max_count=1, max_count=1,
instance_type='m1.small') instance_type='m1.small',
image_id='fake')
for instance_id in instance_ids: for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id)
@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase):
instance_ids = [] instance_ids = []
instance_id = self._create_instance(cores=4) instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id) instance_ids.append(instance_id)
self.assertRaises(cloud.QuotaError, self.cloud.run_instances, self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context, self.context,
min_count=1, min_count=1,
max_count=1, max_count=1,
instance_type='m1.small') instance_type='m1.small',
image_id='fake')
for instance_id in instance_ids: for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id)
@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_volumes): for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume() volume_id = self._create_volume()
volume_ids.append(volume_id) volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError, self.cloud.create_volume, self.assertRaises(quota.QuotaError, self.cloud.create_volume,
self.context, self.context,
size=10) size=10)
for volume_id in volume_ids: for volume_id in volume_ids:
@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase):
volume_ids = [] volume_ids = []
volume_id = self._create_volume(size=20) volume_id = self._create_volume(size=20)
volume_ids.append(volume_id) volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError, self.assertRaises(quota.QuotaError,
self.cloud.create_volume, self.cloud.create_volume,
self.context, self.context,
size=10) size=10)
@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase):
# make an rpc.call, the test just finishes with OK. It # make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks # appears to be something in the magic inline callbacks
# that is breaking. # that is breaking.
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context) self.context)
db.floating_ip_destroy(context.get_admin_context(), address) db.floating_ip_destroy(context.get_admin_context(), address)

View File

@ -25,7 +25,7 @@ import sys
from nova import flags from nova import flags
from nova.virt import fake from nova.virt import fake
from nova.virt import libvirt_conn from nova.virt import libvirt_conn
from nova.virt import xenapi from nova.virt import xenapi_conn
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@ -61,7 +61,7 @@ def get_connection(read_only=False):
elif t == 'libvirt': elif t == 'libvirt':
conn = libvirt_conn.get_connection(read_only) conn = libvirt_conn.get_connection(read_only)
elif t == 'xenapi': elif t == 'xenapi':
conn = xenapi.get_connection(read_only) conn = xenapi_conn.get_connection(read_only)
else: else:
raise Exception('Unknown connection type "%s"' % t) raise Exception('Unknown connection type "%s"' % t)

View File

@ -1,444 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to XenServer or Xen Cloud Platform.
The concurrency model for this class is as follows:
All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
deferredToThread). They are remote calls, and so may hang for the usual
reasons. They should not be allowed to block the reactor thread.
All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
(using XenAPI.VM.async_start etc). These return a task, which can then be
polled for completion. Polling is handled using reactor.callLater.
This combination of techniques means that we don't block the reactor thread at
all, and at the same time we don't hold lots of threads waiting for
long-running operations.
FIXME: get_info currently doesn't conform to these rules, and will block the
reactor thread if the VM.get_by_name_label or VM.get_record calls block.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:xenapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks (Async.VM.start, etc)
(default: 0.5).
"""
import logging
import xmlrpclib
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
from nova import db
from nova import flags
from nova import process
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
XenAPI = None
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
None,
'URL for connection to XenServer/Xen Cloud Platform.'
' Required if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_username',
'root',
'Username for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_password',
None,
'Password for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
flags.DEFINE_float('xenapi_task_poll_interval',
0.5,
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed': power_state.CRASHED}
def get_connection(_):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
# This is loaded late so that there's no need to install this
# library when not using XenAPI.
global XenAPI
if XenAPI is None:
XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
raise Exception('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'connection_type=xenapi')
return XenAPIConnection(url, username, password)
class XenAPIConnection(object):
def __init__(self, url, user, pw):
self._conn = XenAPI.Session(url)
self._conn.login_with_password(user, pw)
def list_instances(self):
return [self._conn.xenapi.VM.get_name_label(vm) \
for vm in self._conn.xenapi.VM.get_all()]
@defer.inlineCallbacks
def spawn(self, instance):
vm = yield self._lookup(instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
instance.name)
network = db.project_get_network(None, instance.project_id)
network_ref = \
yield self._find_network_with_bridge(network.bridge)
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
vdi_uuid = yield self._fetch_image(
instance.image_id, user, project, True)
kernel = yield self._fetch_image(
instance.kernel_id, user, project, False)
ramdisk = yield self._fetch_image(
instance.ramdisk_id, user, project, False)
vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid)
vm_ref = yield self._create_vm(instance, kernel, ramdisk)
yield self._create_vbd(vm_ref, vdi_ref, 0, True)
if network_ref:
yield self._create_vif(vm_ref, network_ref, instance.mac_address)
logging.debug('Starting VM %s...', vm_ref)
yield self._call_xenapi('VM.start', vm_ref, False, False)
logging.info('Spawning VM %s created %s.', instance.name, vm_ref)
@defer.inlineCallbacks
def _create_vm(self, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
'name_label': instance.name,
'name_description': '',
'is_a_template': False,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
'actions_after_shutdown': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_kernel': kernel,
'PV_ramdisk': ramdisk,
'PV_args': 'root=/dev/xvda1',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
'HVM_boot_params': {},
'platform': {},
'PCI_bus': '',
'recommendations': '',
'affinity': '',
'user_version': '0',
'other_config': {},
}
logging.debug('Created VM %s...', instance.name)
vm_ref = yield self._call_xenapi('VM.create', rec)
logging.debug('Created VM %s as %s.', instance.name, vm_ref)
defer.returnValue(vm_ref)
@defer.inlineCallbacks
def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec)
logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
vdi_ref)
defer.returnValue(vbd_ref)
@defer.inlineCallbacks
def _create_vif(self, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
network_ref)
vif_ref = yield self._call_xenapi('VIF.create', vif_rec)
logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
vm_ref, network_ref)
defer.returnValue(vif_ref)
@defer.inlineCallbacks
def _find_network_with_bridge(self, bridge):
expr = 'field "bridge" = "%s"' % bridge
networks = yield self._call_xenapi('network.get_all_records_where',
expr)
if len(networks) == 1:
defer.returnValue(networks.keys()[0])
elif len(networks) > 1:
raise Exception('Found non-unique network for bridge %s' % bridge)
else:
raise Exception('Found no network for bridge %s' % bridge)
@defer.inlineCallbacks
def _fetch_image(self, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
its kernel and ramdisk (if external kernels are being used).
Returns a Deferred that gives the new VDI UUID."""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
logging.debug("Asking xapi to fetch %s as %s" % (url, access))
fn = use_sr and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = user.secret
if use_sr:
args['add_partition'] = 'true'
task = yield self._async_call_plugin('objectstore', fn, args)
uuid = yield self._wait_for_task(task)
defer.returnValue(uuid)
@defer.inlineCallbacks
def reboot(self, instance):
vm = yield self._lookup(instance.name)
if vm is None:
raise Exception('instance not present %s' % instance.name)
task = yield self._call_xenapi('Async.VM.clean_reboot', vm)
yield self._wait_for_task(task)
@defer.inlineCallbacks
def destroy(self, instance):
vm = yield self._lookup(instance.name)
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
defer.returnValue(None)
# Get the VDIs related to the VM
vdis = yield self._lookup_vm_vdis(vm)
try:
task = yield self._call_xenapi('Async.VM.hard_shutdown', vm)
yield self._wait_for_task(task)
except Exception, exc:
logging.warn(exc)
# Disk clean-up
if vdis:
for vdi in vdis:
try:
task = yield self._call_xenapi('Async.VDI.destroy', vdi)
yield self._wait_for_task(task)
except Exception, exc:
logging.warn(exc)
try:
task = yield self._call_xenapi('Async.VM.destroy', vm)
yield self._wait_for_task(task)
except Exception, exc:
logging.warn(exc)
def get_info(self, instance_id):
vm = self._lookup_blocking(instance_id)
if vm is None:
raise Exception('instance not present %s' % instance_id)
rec = self._conn.xenapi.VM.get_record(vm)
return {'state': XENAPI_POWER_STATE[rec['power_state']],
'max_mem': long(rec['memory_static_max']) >> 10,
'mem': long(rec['memory_dynamic_max']) >> 10,
'num_cpu': rec['VCPUs_max'],
'cpu_time': 0}
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
@utils.deferredToThread
def _lookup(self, i):
return self._lookup_blocking(i)
def _lookup_blocking(self, i):
vms = self._conn.xenapi.VM.get_by_name_label(i)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise Exception('duplicate name found: %s' % i)
else:
return vms[0]
@utils.deferredToThread
def _lookup_vm_vdis(self, vm):
return self._lookup_vm_vdis_blocking(vm)
def _lookup_vm_vdis_blocking(self, vm):
# Firstly we get the VBDs, then the VDIs.
# TODO: do we leave the read-only devices?
vbds = self._conn.xenapi.VM.get_VBDs(vm)
vdis = []
if vbds:
for vbd in vbds:
try:
vdi = self._conn.xenapi.VBD.get_VDI(vbd)
# Test valid VDI
record = self._conn.xenapi.VDI.get_record(vdi)
except Exception, exc:
logging.warn(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
return vdis
else:
return None
def _wait_for_task(self, task):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes."""
d = defer.Deferred()
reactor.callLater(0, self._poll_task, task, d)
return d
@utils.deferredToThread
def _poll_task(self, task, deferred):
"""Poll the given XenAPI task, and fire the given Deferred if we
get a result."""
try:
#logging.debug('Polling task %s...', task)
status = self._conn.xenapi.task.get_status(task)
if status == 'pending':
reactor.callLater(FLAGS.xenapi_task_poll_interval,
self._poll_task, task, deferred)
elif status == 'success':
result = self._conn.xenapi.task.get_result(task)
logging.info('Task %s status: success. %s', task, result)
deferred.callback(_parse_xmlrpc_value(result))
else:
error_info = self._conn.xenapi.task.get_error_info(task)
logging.warn('Task %s status: %s. %s', task, status,
error_info)
deferred.errback(XenAPI.Failure(error_info))
#logging.debug('Polling task %s done.', task)
except Exception, exc:
logging.warn(exc)
deferred.errback(exc)
@utils.deferredToThread
def _call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread. Returns
a Deferred for the result."""
f = self._conn.xenapi
for m in method.split('.'):
f = f.__getattr__(m)
return f(*args)
@utils.deferredToThread
def _async_call_plugin(self, plugin, fn, args):
"""Call Async.host.call_plugin on a background thread. Returns a
Deferred with the task reference."""
return _unwrap_plugin_exceptions(
self._conn.xenapi.Async.host.call_plugin,
self._get_xenapi_host(), plugin, fn, args)
def _get_xenapi_host(self):
return self._conn.xenapi.session.get_this_host(self._conn.handle)
def _unwrap_plugin_exceptions(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except XenAPI.Failure, exc:
logging.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
params = eval(exc.details[3])
except:
raise exc
raise XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError, exc:
logging.debug("Got exception: %s", exc)
raise
def _parse_xmlrpc_value(val):
"""Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
if not val:
return val
x = xmlrpclib.loads(
'<?xml version="1.0"?><methodResponse><params><param>' +
val +
'</param></params></methodResponse>')
return x[0][0]

View File

@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,45 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
from twisted.internet import defer
class NetworkHelper():
"""
The class that wraps the helper methods together.
"""
def __init__(self):
return
@classmethod
@defer.inlineCallbacks
def find_network_with_bridge(cls, session, bridge):
""" Return the network on which the bridge is attached, if found """
expr = 'field "bridge" = "%s"' % bridge
networks = yield session.call_xenapi('network.get_all_records_where',
expr)
if len(networks) == 1:
defer.returnValue(networks.keys()[0])
elif len(networks) > 1:
raise Exception('Found non-unique network for bridge %s' % bridge)
else:
raise Exception('Found no network for bridge %s' % bridge)

View File

@ -0,0 +1,216 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import logging
from twisted.internet import defer
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed': power_state.CRASHED}
XenAPI = None
class VMHelper():
"""
The class that wraps the helper methods together.
"""
def __init__(self):
global XenAPI
if XenAPI is None:
XenAPI = __import__('XenAPI')
@classmethod
@defer.inlineCallbacks
def create_vm(cls, session, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
'name_label': instance.name,
'name_description': '',
'is_a_template': False,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
'actions_after_shutdown': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_kernel': kernel,
'PV_ramdisk': ramdisk,
'PV_args': 'root=/dev/xvda1',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
'HVM_boot_params': {},
'platform': {},
'PCI_bus': '',
'recommendations': '',
'affinity': '',
'user_version': '0',
'other_config': {},
}
logging.debug('Created VM %s...', instance.name)
vm_ref = yield session.call_xenapi('VM.create', rec)
logging.debug('Created VM %s as %s.', instance.name, vm_ref)
defer.returnValue(vm_ref)
@classmethod
@defer.inlineCallbacks
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec)
logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
vdi_ref)
defer.returnValue(vbd_ref)
@classmethod
@defer.inlineCallbacks
def create_vif(cls, session, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
network_ref)
vif_ref = yield session.call_xenapi('VIF.create', vif_rec)
logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
vm_ref, network_ref)
defer.returnValue(vif_ref)
@classmethod
@defer.inlineCallbacks
def fetch_image(cls, session, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
its kernel and ramdisk (if external kernels are being used).
Returns a Deferred that gives the new VDI UUID."""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
logging.debug("Asking xapi to fetch %s as %s", url, access)
fn = use_sr and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = user.secret
if use_sr:
args['add_partition'] = 'true'
task = yield session.async_call_plugin('objectstore', fn, args)
uuid = yield session.wait_for_task(task)
defer.returnValue(uuid)
@classmethod
@utils.deferredToThread
def lookup(cls, session, i):
""" Look the instance i up, and returns it if available """
return VMHelper.lookup_blocking(session, i)
@classmethod
def lookup_blocking(cls, session, i):
""" Synchronous lookup """
vms = session.get_xenapi().VM.get_by_name_label(i)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise Exception('duplicate name found: %s' % i)
else:
return vms[0]
@classmethod
@utils.deferredToThread
def lookup_vm_vdis(cls, session, vm):
""" Look for the VDIs that are attached to the VM """
return VMHelper.lookup_vm_vdis_blocking(session, vm)
@classmethod
def lookup_vm_vdis_blocking(cls, session, vm):
""" Synchronous lookup_vm_vdis """
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbds = session.get_xenapi().VM.get_VBDs(vm)
vdis = []
if vbds:
for vbd in vbds:
try:
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
logging.debug('VDI %s is still available', record['uuid'])
except XenAPI.Failure, exc:
logging.warn(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
return vdis
else:
return None
@classmethod
def compile_info(cls, record):
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
'num_cpu': record['VCPUs_max'],
'cpu_time': 0}

134
nova/virt/xenapi/vmops.py Normal file
View File

@ -0,0 +1,134 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import logging
from twisted.internet import defer
from nova import db
from nova import context
from nova.auth.manager import AuthManager
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
XenAPI = None
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session):
global XenAPI
if XenAPI is None:
XenAPI = __import__('XenAPI')
self._session = session
def list_instances(self):
""" List VM instances """
return [self._session.get_xenapi().VM.get_name_label(vm) \
for vm in self._session.get_xenapi().VM.get_all()]
@defer.inlineCallbacks
def spawn(self, instance):
""" Create VM instance """
vm = yield VMHelper.lookup(self._session, instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
instance.name)
bridge = db.project_get_network(context.get_admin_context(),
instance.project_id).bridge
network_ref = \
yield NetworkHelper.find_network_with_bridge(self._session, bridge)
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
vdi_uuid = yield VMHelper.fetch_image(self._session,
instance.image_id, user, project, True)
kernel = yield VMHelper.fetch_image(self._session,
instance.kernel_id, user, project, False)
ramdisk = yield VMHelper.fetch_image(self._session,
instance.ramdisk_id, user, project, False)
vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
vm_ref = yield VMHelper.create_vm(self._session,
instance, kernel, ramdisk)
yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
if network_ref:
yield VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
logging.debug('Starting VM %s...', vm_ref)
yield self._session.call_xenapi('VM.start', vm_ref, False, False)
logging.info('Spawning VM %s created %s.', instance.name,
vm_ref)
@defer.inlineCallbacks
def reboot(self, instance):
""" Reboot VM instance """
instance_name = instance.name
vm = yield VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception('instance not present %s' % instance_name)
task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm)
yield self._session.wait_for_task(task)
@defer.inlineCallbacks
def destroy(self, instance):
""" Destroy VM instance """
vm = yield VMHelper.lookup(self._session, instance.name)
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
defer.returnValue(None)
# Get the VDIs related to the VM
vdis = yield VMHelper.lookup_vm_vdis(self._session, vm)
try:
task = yield self._session.call_xenapi('Async.VM.hard_shutdown',
vm)
yield self._session.wait_for_task(task)
except XenAPI.Failure, exc:
logging.warn(exc)
# Disk clean-up
if vdis:
for vdi in vdis:
try:
task = yield self._session.call_xenapi('Async.VDI.destroy',
vdi)
yield self._session.wait_for_task(task)
except XenAPI.Failure, exc:
logging.warn(exc)
try:
task = yield self._session.call_xenapi('Async.VM.destroy', vm)
yield self._session.wait_for_task(task)
except XenAPI.Failure, exc:
logging.warn(exc)
def get_info(self, instance_id):
""" Return data about VM instance """
vm = VMHelper.lookup_blocking(self._session, instance_id)
if vm is None:
raise Exception('instance not present %s' % instance_id)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
def get_console_output(self, instance):
""" Return snapshot of console """
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'

View File

@ -0,0 +1,32 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
class VolumeOps(object):
def __init__(self, session):
self._session = session
def attach_volume(self, instance_name, device_path, mountpoint):
# FIXME: that's going to be sorted when iscsi-xenapi lands in branch
return True
def detach_volume(self, instance_name, mountpoint):
# FIXME: that's going to be sorted when iscsi-xenapi lands in branch
return True

238
nova/virt/xenapi_conn.py Normal file
View File

@ -0,0 +1,238 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to XenServer or Xen Cloud Platform.
The concurrency model for this class is as follows:
All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
deferredToThread). They are remote calls, and so may hang for the usual
reasons. They should not be allowed to block the reactor thread.
All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
(using XenAPI.VM.async_start etc). These return a task, which can then be
polled for completion. Polling is handled using reactor.callLater.
This combination of techniques means that we don't block the reactor thread at
all, and at the same time we don't hold lots of threads waiting for
long-running operations.
FIXME: get_info currently doesn't conform to these rules, and will block the
reactor thread if the VM.get_by_name_label or VM.get_record calls block.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:xenapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks (Async.VM.start, etc)
(default: 0.5).
"""
import logging
import xmlrpclib
from twisted.internet import defer
from twisted.internet import reactor
from nova import utils
from nova import flags
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
None,
'URL for connection to XenServer/Xen Cloud Platform.'
' Required if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_username',
'root',
'Username for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_password',
None,
'Password for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
flags.DEFINE_float('xenapi_task_poll_interval',
0.5,
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
XenAPI = None
def get_connection(_):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
# This is loaded late so that there's no need to install this
# library when not using XenAPI.
global XenAPI
if XenAPI is None:
XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
raise Exception('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'connection_type=xenapi')
return XenAPIConnection(url, username, password)
class XenAPIConnection(object):
""" A connection to XenServer or Xen Cloud Platform """
def __init__(self, url, user, pw):
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
def list_instances(self):
""" List VM instances """
return self._vmops.list_instances()
def spawn(self, instance):
""" Create VM instance """
self._vmops.spawn(instance)
def reboot(self, instance):
""" Reboot VM instance """
self._vmops.reboot(instance)
def destroy(self, instance):
""" Destroy VM instance """
self._vmops.destroy(instance)
def get_info(self, instance_id):
""" Return data about VM instance """
return self._vmops.get_info(instance_id)
def get_console_output(self, instance):
""" Return snapshot of console """
return self._vmops.get_console_output(instance)
def attach_volume(self, instance_name, device_path, mountpoint):
""" Attach volume storage to VM instance """
return self._volumeops.attach_volume(instance_name,
device_path,
mountpoint)
def detach_volume(self, instance_name, mountpoint):
""" Detach volume storage to VM instance """
return self._volumeops.detach_volume(instance_name, mountpoint)
class XenAPISession(object):
""" The session to invoke XenAPI SDK calls """
def __init__(self, url, user, pw):
self._session = XenAPI.Session(url)
self._session.login_with_password(user, pw)
def get_xenapi(self):
""" Return the xenapi object """
return self._session.xenapi
def get_xenapi_host(self):
""" Return the xenapi host """
return self._session.xenapi.session.get_this_host(self._session.handle)
@utils.deferredToThread
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread. Returns
a Deferred for the result."""
f = self._session.xenapi
for m in method.split('.'):
f = f.__getattr__(m)
return f(*args)
@utils.deferredToThread
def async_call_plugin(self, plugin, fn, args):
"""Call Async.host.call_plugin on a background thread. Returns a
Deferred with the task reference."""
return _unwrap_plugin_exceptions(
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
def wait_for_task(self, task):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes."""
d = defer.Deferred()
reactor.callLater(0, self._poll_task, task, d)
return d
@utils.deferredToThread
def _poll_task(self, task, deferred):
"""Poll the given XenAPI task, and fire the given Deferred if we
get a result."""
try:
#logging.debug('Polling task %s...', task)
status = self._session.xenapi.task.get_status(task)
if status == 'pending':
reactor.callLater(FLAGS.xenapi_task_poll_interval,
self._poll_task, task, deferred)
elif status == 'success':
result = self._session.xenapi.task.get_result(task)
logging.info('Task %s status: success. %s', task, result)
deferred.callback(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
logging.warn('Task %s status: %s. %s', task, status,
error_info)
deferred.errback(XenAPI.Failure(error_info))
#logging.debug('Polling task %s done.', task)
except XenAPI.Failure, exc:
logging.warn(exc)
deferred.errback(exc)
def _unwrap_plugin_exceptions(func, *args, **kwargs):
""" Parse exception details """
try:
return func(*args, **kwargs)
except XenAPI.Failure, exc:
logging.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
params = eval(exc.details[3])
except:
raise exc
raise XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError, exc:
logging.debug("Got exception: %s", exc)
raise
def _parse_xmlrpc_value(val):
"""Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
if not val:
return val
x = xmlrpclib.loads(
'<?xml version="1.0"?><methodResponse><params><param>' +
val +
'</param></params></methodResponse>')
return x[0][0]

View File

@ -57,6 +57,7 @@ setup(name='nova',
cmdclass={ 'sdist': local_sdist, cmdclass={ 'sdist': local_sdist,
'build_sphinx' : local_BuildDoc }, 'build_sphinx' : local_BuildDoc },
packages=find_packages(exclude=['bin', 'smoketests']), packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
scripts=['bin/nova-api', scripts=['bin/nova-api',
'bin/nova-compute', 'bin/nova-compute',
'bin/nova-dhcpbridge', 'bin/nova-dhcpbridge',

View File

@ -20,3 +20,4 @@ mox==0.5.0
-f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz -f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz
greenlet==0.3.1 greenlet==0.3.1
nose nose
bzr