From 33850cb57e195e538d6e42cb6d10f8296c0d4be4 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Mon, 25 Oct 2010 22:42:49 +0000 Subject: [PATCH 01/83] Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files --- nova/auth/nova_openldap.schema | 84 +++++++++++++++++++ nova/auth/nova_sun.schema | 16 ++++ nova/auth/openssh-lpk_openldap.schema | 19 +++++ nova/auth/openssh-lpk_sun.schema | 3 + nova/auth/slap.sh | 112 +------------------------- 5 files changed, 125 insertions(+), 109 deletions(-) create mode 100644 nova/auth/nova_openldap.schema create mode 100644 nova/auth/nova_sun.schema create mode 100644 nova/auth/openssh-lpk_openldap.schema create mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema new file mode 100644 index 000000000000..4047361de481 --- /dev/null +++ b/nova/auth/nova_openldap.schema @@ -0,0 +1,84 @@ +# +# Person object for Nova +# inetorgperson with extra attributes +# Author: Vishvananda Ishaya +# +# + +# using internet experimental oid arc as per BP64 3.1 +objectidentifier novaSchema 1.3.6.1.3.1.666.666 +objectidentifier novaAttrs novaSchema:3 +objectidentifier novaOCs novaSchema:4 + +attributetype ( + novaAttrs:1 + NAME 'accessKey' + DESC 'Key for accessing data' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:2 + NAME 'secretKey' + DESC 'Secret key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:3 + NAME 'keyFingerprint' + DESC 'Fingerprint of private key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:4 + NAME 'isAdmin' + DESC 'Is user an administrator?' + EQUALITY booleanMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:5 + NAME 'projectManager' + DESC 'Project Managers of a project' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + ) + +objectClass ( + novaOCs:1 + NAME 'novaUser' + DESC 'access and secret keys' + AUXILIARY + MUST ( uid ) + MAY ( accessKey $ secretKey $ isAdmin ) + ) + +objectClass ( + novaOCs:2 + NAME 'novaKeyPair' + DESC 'Key pair for User' + SUP top + STRUCTURAL + MUST ( cn $ sshPublicKey $ keyFingerprint ) + ) + +objectClass ( + novaOCs:3 + NAME 'novaProject' + DESC 'Container for project' + SUP groupOfNames + STRUCTURAL + MUST ( cn $ projectManager ) + ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema new file mode 100644 index 000000000000..e925e05e48be --- /dev/null +++ b/nova/auth/nova_sun.schema @@ -0,0 +1,16 @@ +# +# Person object for Nova +# inetorgperson with extra attributes +# Author: Vishvananda Ishaya +# Modified for strict RFC 4512 compatibility by: Ryan Lane +# +# using internet experimental oid arc as per BP64 3.1 +dn: cn=schema +attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 000000000000..93351da6dd0e --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 000000000000..5b220ab06299 --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,3 @@ +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index fdc0e39dc16a..797675d2e45d 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -20,115 +20,9 @@ apt-get install -y slapd ldap-utils python-ldap -cat >/etc/ldap/schema/openssh-lpk_openldap.schema < -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) -LPK_SCHEMA_EOF - -cat >/etc/ldap/schema/nova.schema < -# -# - -# using internet experimental oid arc as per BP64 3.1 -objectidentifier novaSchema 1.3.6.1.3.1.666.666 -objectidentifier novaAttrs novaSchema:3 -objectidentifier novaOCs novaSchema:4 - -attributetype ( - novaAttrs:1 - NAME 'accessKey' - DESC 'Key for accessing data' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:2 - NAME 'secretKey' - DESC 'Secret key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' - EQUALITY booleanMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - -objectClass ( - novaOCs:1 - NAME 'novaUser' - DESC 'access and secret keys' - AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) - ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) - -NOVA_SCHEMA_EOF +abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf < Date: Mon, 25 Oct 2010 22:50:32 +0000 Subject: [PATCH 02/83] Documentation was missing; added --- nova/auth/openssh-lpk_sun.schema | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema index 5b220ab06299..5f52db3b6511 100644 --- a/nova/auth/openssh-lpk_sun.schema +++ b/nova/auth/openssh-lpk_sun.schema @@ -1,3 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# dn: cn=schema attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) From b65b41e5957d5ded516343b3611292c9744d169f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 4 Nov 2010 12:42:14 +0100 Subject: [PATCH 03/83] Add a templating mechanism in the flag parsing. Add a state_path flag that will be used as the top-level dir for all other state (such as images, instances, buckets, networks, etc). This way you only need to change one flag to put all your state in e.g. /var/lib/nova. --- nova/compute/manager.py | 2 +- nova/compute/monitor.py | 2 +- nova/crypto.py | 4 ++-- nova/flags.py | 22 ++++++++++++++++++++-- nova/network/linux_net.py | 2 +- nova/objectstore/bucket.py | 2 +- nova/objectstore/image.py | 4 ++-- 7 files changed, 28 insertions(+), 10 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 850cded8aa5b..65fa5043162d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -33,7 +33,7 @@ from nova.compute import power_state FLAGS = flags.FLAGS -flags.DEFINE_string('instances_path', utils.abspath('../instances'), +flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index d0154600f368..024f3ed3c3b5 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -46,7 +46,7 @@ flags.DEFINE_integer('monitoring_instances_delay', 5, 'Sleep time between updates') flags.DEFINE_integer('monitoring_instances_step', 300, 'Interval of RRD updates') -flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', +flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances', 'Location of RRD files') diff --git a/nova/crypto.py b/nova/crypto.py index 16b4f5e1f26a..045f7f53f8b6 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -39,9 +39,9 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') -flags.DEFINE_string('keys_path', utils.abspath('../keys'), +flags.DEFINE_string('keys_path', '$state_path/keys', 'Where we keep our keys') -flags.DEFINE_string('ca_path', utils.abspath('../CA'), +flags.DEFINE_string('ca_path', '$state_path/CA', 'Where we keep our root CA') flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') diff --git a/nova/flags.py b/nova/flags.py index 4ae86d9b2c41..2b8bbbdb7a54 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -26,6 +26,8 @@ import os import socket import sys +from string import Template + import gflags @@ -134,8 +136,21 @@ class FlagValues(gflags.FlagValues): def __getattr__(self, name): if self.IsDirty(name): self.ParseNewFlags() - return gflags.FlagValues.__getattr__(self, name) + val = gflags.FlagValues.__getattr__(self, name) + if type(val) is str: + tmpl = Template(val) + return tmpl.substitute(StrWrapper(self)) + return val +class StrWrapper(object): + def __init__(self, obj): + self.wrapped = obj + + def __getitem__(self, name): + if hasattr(self.wrapped, name): + return str(getattr(self.wrapped, name)) + else: + raise KeyError(name) FLAGS = FlagValues() gflags.FLAGS = FLAGS @@ -218,8 +233,11 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') +DEFINE_string('state_path', os.path.abspath("./"), + "Top-level directory for maintaining nova's state") + DEFINE_string('sql_connection', - 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), + 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7b323efa1500..f504b3d29b48 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -38,7 +38,7 @@ flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') -flags.DEFINE_string('networks_path', utils.abspath('../networks'), +flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') flags.DEFINE_string('public_interface', 'vlan1', 'Interface for public IP addresses') diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index 0ba4934d12f4..fce3ec27bc95 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -33,7 +33,7 @@ from nova.objectstore import stored FLAGS = flags.FLAGS -flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), +flags.DEFINE_string('buckets_path', '$state_path/buckets', 'path to s3 buckets') diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index b7b2ec6ab580..51aef7343f14 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -39,8 +39,8 @@ from nova.objectstore import bucket FLAGS = flags.FLAGS -flags.DEFINE_string('images_path', utils.abspath('../images'), - 'path to decrypted images') +flags.DEFINE_string('images_path', '$state_path/images', + 'path to decrypted images') class Image(object): From 659058bef7913254eca63e7f67a5d74ffe146e57 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Nov 2010 00:11:45 +0000 Subject: [PATCH 04/83] fixes flatdhcp, updates nova.sh, allows for empty bridge device --- contrib/nova.sh | 28 ++++++++++++++++++++-------- nova/network/linux_net.py | 7 ++++--- nova/network/manager.py | 24 +++++++++++++++--------- 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 9bc36d6fbdbe..abb8c7de0203 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -17,11 +17,16 @@ if [ ! -n "$HOST_IP" ]; then # you should explicitly set HOST_IP in your environment HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` fi -TEST=0 -USE_MYSQL=0 -MYSQL_PASS=nova -USE_LDAP=0 -LIBVIRT_TYPE=qemu + +USE_MYSQL=${USE_MYSQL:-0} +MYSQL_PASS=${MYSQL_PASS:-nova} +TEST=${TEST:-0} +USE_LDAP=${USE_LDAP:-0} +LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} +NET_MAN=${NET_MAN:-VlanManager} +# NOTE(vish): If you are using FlatDHCP make sure that this is not your +# public interface. You can comment it out for local usage +BRIDGE_DEV=eth0 if [ "$USE_MYSQL" == 1 ]; then SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova @@ -41,6 +46,7 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF --nodaemon --dhcpbridge_flagfile=/etc/nova/nova-manage.conf --FAKE_subdomain=ec2 +--network_manager=nova.network.manager.$NET_MAN --cc_host=$HOST_IP --routing_source_ip=$HOST_IP --sql_connection=$SQL_CONN @@ -48,6 +54,10 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF --libvirt_type=$LIBVIRT_TYPE NOVA_CONF_EOF +if [ -n "$BRIDGE_DEV" ]; then + echo "--bridge_dev=$BRIDGE_DEV" >>/etc/nova/nova-manage.conf +fi + if [ "$CMD" == "branch" ]; then sudo apt-get install -y bzr rm -rf $NOVA_DIR @@ -65,6 +75,8 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y dnsmasq open-iscsi kpartx kvm gawk iptables ebtables sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y screen iscsitarget euca2ools vlan curl rabbitmq-server + echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget + sudo /etc/init.d/iscsitarget restart sudo modprobe kvm sudo /etc/init.d/libvirt-bin restart sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot @@ -123,8 +135,8 @@ if [ "$CMD" == "run" ]; then $NOVA_DIR/bin/nova-manage project create admin admin # export environment variables for project 'admin' and user 'admin' $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc - # create 3 small networks - $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 3 16 + # create a small network + $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32 # nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. @@ -135,7 +147,7 @@ if [ "$CMD" == "run" ]; then screen_it scheduler "$NOVA_DIR/bin/nova-scheduler --flagfile=/etc/nova/nova-manage.conf" screen_it volume "$NOVA_DIR/bin/nova-volume --flagfile=/etc/nova/nova-manage.conf" screen_it test ". $NOVA_DIR/novarc" - screen -x + screen -S nova -x fi if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7b323efa1500..68037ed9a19e 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -42,7 +42,7 @@ flags.DEFINE_string('networks_path', utils.abspath('../networks'), 'Location to keep network config files') flags.DEFINE_string('public_interface', 'vlan1', 'Interface for public IP addresses') -flags.DEFINE_string('bridge_dev', 'eth0', +flags.DEFINE_string('bridge_dev', None, 'network device for bridges') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') @@ -142,12 +142,13 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug("Starting Bridge inteface for %s", interface) + logging.debug("Starting Bridge interface for %s", interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) _execute("sudo brctl stp %s off" % bridge) - _execute("sudo brctl addif %s %s" % (bridge, interface)) + if interface: + _execute("sudo brctl addif %s %s" % (bridge, interface)) if net_attrs: _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (bridge, diff --git a/nova/network/manager.py b/nova/network/manager.py index b033bb0a4c04..96f8cf50b0b5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -63,7 +63,7 @@ flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') -flags.DEFINE_string('flat_network_dhcp_start', '192.168.0.2', +flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') @@ -285,6 +285,7 @@ class FlatManager(NetworkManager): cidr = "%s/%s" % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) net = {} + net['bridge'] = FLAGS.flat_network_bridge net['cidr'] = cidr net['netmask'] = str(project_net.netmask()) net['gateway'] = str(project_net[1]) @@ -307,17 +308,23 @@ class FlatManager(NetworkManager): """Called when this host becomes the host for a network.""" net = {} net['injected'] = True - net['bridge'] = FLAGS.flat_network_bridge net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) -class FlatDHCPManager(NetworkManager): +class FlatDHCPManager(FlatManager): """Flat networking with dhcp.""" + def setup_compute_network(self, context, instance_id): + """Sets up matching network for compute hosts.""" + network_ref = db.network_get_by_instance(context, instance_id) + self.driver.ensure_bridge(network_ref['bridge'], + FLAGS.bridge_dev, + network_ref) + def setup_fixed_ip(self, context, address): """Setup dhcp for this network.""" - network_ref = db.fixed_ip_get_by_address(context, address) + network_ref = db.fixed_ip_get_network(context, address) self.driver.update_dhcp(context, network_ref['id']) def deallocate_fixed_ip(self, context, address, *args, **kwargs): @@ -326,11 +333,10 @@ class FlatDHCPManager(NetworkManager): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project.""" - super(FlatDHCPManager, self)._on_set_network_host(context, network_id) - network_ref = self.db.network_get(context, network_id) - self.db.network_update(context, - network_id, - {'dhcp_start': FLAGS.flat_network_dhcp_start}) + net = {} + net['dhcp_start'] = FLAGS.flat_network_dhcp_start + self.db.network_update(context, network_id, net) + network_ref = db.network_get(context, network_id) self.driver.ensure_bridge(network_ref['bridge'], FLAGS.bridge_dev, network_ref) From 0b70b44c11830549938c5153b4322b960c53963d Mon Sep 17 00:00:00 2001 From: Anne Gentle Date: Mon, 22 Nov 2010 16:43:17 -0600 Subject: [PATCH 05/83] Incorporating more networking info --- doc/source/adminguide/index.rst | 6 +++--- doc/source/adminguide/managing.networks.rst | 15 --------------- doc/source/adminguide/network.vlan.rst | 8 ++++---- doc/source/nova.concepts.rst | 21 +++++++++++++++++---- 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst index 51228b3191cc..736a154b216c 100644 --- a/doc/source/adminguide/index.rst +++ b/doc/source/adminguide/index.rst @@ -38,14 +38,14 @@ There are two main tools that a system administrator will find useful to manage nova.manage euca2ools -nova-manage may only be run by users with admin priviledges. euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users` +The nova-manage command may only be run by users with admin priviledges. Commands for euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users` User and Resource Management ---------------------------- -nova-manage and euca2ools provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks. +The nova-manage and euca2ools commands provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks. -For background on the core objects refenced in this section, see :doc:`../object.model` +For background on the core objects referenced in this section, see :doc:`../object.model` .. toctree:: :maxdepth: 1 diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/adminguide/managing.networks.rst index c8df471e84ad..b8563637e948 100644 --- a/doc/source/adminguide/managing.networks.rst +++ b/doc/source/adminguide/managing.networks.rst @@ -20,21 +20,6 @@ Networking Overview =================== In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.) The Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. -.. - (perhaps some of this should be moved elsewhere) - Introduction - ------------ - - Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance. - - .. todo:: Insert Figure 1 image from "An OpenStack Network Overview" contributed by Citrix - - Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received. - - To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) - - .. note:: The database schema is available on the `OpenStack Wiki _`. - Nova Network Strategies ----------------------- diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst index 5bbc54bed53e..c6c4e7f91ea3 100644 --- a/doc/source/adminguide/network.vlan.rst +++ b/doc/source/adminguide/network.vlan.rst @@ -50,7 +50,7 @@ The following diagram illustrates how the communication that occurs between the Goals ----- -* each project is in a protected network segment +For our implementation of Nova, our goal is that each project is in a protected network segment. Here are the specifications we keep in mind for meeting this goal. * RFC-1918 IP space * public IP via NAT @@ -59,19 +59,19 @@ Goals * limited (project-admin controllable) access to other project segments * all connectivity to instance and cloud API is via VPN into the project segment -* common DMZ segment for support services (only visible from project segment) +We also keep as a goal a common DMZ segment for support services, meaning these items are only visible from project segment: * metadata * dashboard - Limitations ----------- +We kept in mind some of these limitations: + * Projects / cluster limited to available VLANs in switching infrastructure * Requires VPN for access to project segment - Implementation -------------- Currently Nova segregates project VLANs using 802.1q VLAN tagging in the diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index ddf0f1b829bd..d47438de71de 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -23,13 +23,13 @@ Nova Concepts and Introduction Introduction ------------ -Nova is the software that controls your Infrastructure as as Service (IaaS) +Nova, also known as OpenStack Compute, is the software that controls your Infrastructure as as Service (IaaS) cloud computing platform. It is similar in scope to Amazon EC2 and Rackspace -CloudServers. Nova does not include any virtualization software, rather it +Cloud Servers. Nova does not include any virtualization software, rather it defines drivers that interact with underlying virtualization mechanisms that run on your host operating system, and exposes functionality over a web API. -This document does not attempt to explain fundamental concepts of cloud +This site does not attempt to explain fundamental concepts of cloud computing, IaaS, virtualization, or other related technologies. Instead, it focuses on describing how Nova's implementation of those concepts is achieved. @@ -64,6 +64,19 @@ Concept: Instances An 'instance' is a word for a virtual machine that runs inside the cloud. +Concept: System Architecture +---------------------------- + +Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance. + + .. image:: images/Novadiagram.png + +Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received. + +To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) + + .. note:: The database schema is available on the `OpenStack Wiki _`. + Concept: Storage ---------------- @@ -150,7 +163,7 @@ See doc:`nova.manage` in the Administration Guide for more details. Concept: Flags -------------- -python-gflags +Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. Concept: Plugins From a19d0e294efac1fb7e8e3e45a286f6032172da23 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 22 Nov 2010 17:59:49 -0500 Subject: [PATCH 06/83] Rename cloudServersFault (rackspace branding) to computeFault. Fixes bug lp680285. --- nova/api/openstack/faults.py | 2 +- nova/tests/api/openstack/test_api.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index e69e514390ae..224a7ef0b00c 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "cloudServersFault") + fault_name = self._fault_names.get(code, "computeFault") fault_data = { fault_name: { 'code': code, diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index dd83991b967f..d8b202e21226 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -50,12 +50,12 @@ class APITest(unittest.TestCase): api.application = succeed resp = Request.blank('/').get_response(api) - self.assertFalse('cloudServersFault' in resp.body, resp.body) + self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 200, resp.body) api.application = raise_webob_exc resp = Request.blank('/').get_response(api) - self.assertFalse('cloudServersFault' in resp.body, resp.body) + self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) api.application = raise_api_fault @@ -65,10 +65,10 @@ class APITest(unittest.TestCase): api.application = fail resp = Request.blank('/').get_response(api) - self.assertTrue('{"cloudServersFault' in resp.body, resp.body) + self.assertTrue('{"computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) api.application = fail resp = Request.blank('/.xml').get_response(api) - self.assertTrue(' Date: Mon, 22 Nov 2010 17:03:54 -0600 Subject: [PATCH 07/83] Incorporating security groups info --- doc/source/devref/cloudpipe.rst | 2 +- doc/source/nova.concepts.rst | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst index 31bd85e8178f..fb104c160e52 100644 --- a/doc/source/devref/cloudpipe.rst +++ b/doc/source/devref/cloudpipe.rst @@ -21,7 +21,7 @@ Cloudpipe -- Per Project Vpns ============================= -Cloudpipe is a method for connecting end users to their project insnances in vlan mode. +Cloudpipe is a method for connecting end users to their project instances in vlan mode. Overview diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index d47438de71de..18368546bdc2 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -117,9 +117,9 @@ Concept: API Concept: Networking ------------------- -Nova has a concept of Fixed Ips and Floating ips. Fixed ips are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time. +Nova has a concept of Fixed IPs and Floating IPs. Fixed IPs are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time. -There are multiple strategies available for implementing fixed ips: +There are multiple strategies available for implementing fixed IPs: Flat Mode ~~~~~~~~~ @@ -129,7 +129,7 @@ The simplest networking mode. Each instance receives a fixed ip from the pool. Flat DHCP Mode ~~~~~~~~~~~~~~ -This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed ips by doing a dhcpdiscover. +This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. VLAN DHCP Mode ~~~~~~~~~~~~~~ @@ -200,8 +200,17 @@ Concept: Scheduler Concept: Security Groups ------------------------ -Security groups +In Nova, a security group is a named collection of network access rules, like firewall policies. These access rules specify which incoming network traffic should be delivered to all VM instances in the group, all other incoming traffic being discarded. Users can modify rules for a group at any time. The new rules are automatically enforced for all running instances and instances launched from then on. +When launching VM instances, the project manager specifies which security groups it wants to join. It will become a member of these specified security groups when it is launched. If no groups are specified, the instances is assigned to the default group, which by default allows all network traffic from other members of this group and discards traffic from other IP addresses and groups. If this does not meet a user's needs, the user can modify the rule settings of the default group. + +A security group can be thought of as a security profile or a security role - it promotes the good practice of managing firewalls by role, not by machine. For example, a user could stipulate that servers with the "webapp" role must be able to connect to servers with the "mysql" role on port 3306. Going further with the security profile analogy, an instance can be launched with membership of multiple security groups - similar to a server with multiple roles. Because all rules in security groups are ACCEPT rules, it's trivial to combine them. + +Each rule in a security group must specify the source of packets to be allowed, which can either be a subnet anywhere on the Internet (in CIDR notation, with 0.0.0./0 representing the entire Internet) or another security group. In the latter case, the source security group can be any user's group. This makes it easy to grant selective access to one user's instances from instances run by the user's friends, partners, and vendors. + +The creation of rules with other security groups specified as sources helps users deal with dynamic IP addressing. Without this feature, the user would have had to adjust the security groups each time a new instance is launched. This practice would become cumbersome if an application running in Nova is very dynamic and elastic, for example scales up or down frequently. + +Security groups for a VM are passed at launch time by the cloud controller to the compute node, and applied at the compute node when a VM is started. Concept: Certificate Authority ------------------------------ From 981927385193b95f532dbf03c3e350f65c1b9005 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 23 Nov 2010 11:45:56 +0100 Subject: [PATCH 08/83] Unify the location of the default flagfile. Not all workers called utils.default_flagfile, and nova-manage explicitly said to use the one in /etc/nova/nova-manage.conf. This made development awkward since everything but nova-manage would use defaults for everything, but nova-manage would use whatever config was in /etc/nova/nova-manage.conf which was likely put there by a package of some sort. --- bin/nova-compute | 2 ++ bin/nova-instancemonitor | 2 ++ bin/nova-manage | 2 +- bin/nova-network | 2 ++ bin/nova-scheduler | 2 ++ bin/nova-volume | 2 ++ 6 files changed, 11 insertions(+), 1 deletion(-) diff --git a/bin/nova-compute b/bin/nova-compute index 1724e96594d5..a66477af536f 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -34,10 +34,12 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import service from nova import twistd +from nova import utils if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + utils.default_flagfile() application = service.Service.create() # pylint: disable=C0103 diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 094da403344f..a7b7fb0c68e7 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -34,6 +34,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) +from nova import utils from nova import twistd from nova.compute import monitor @@ -44,6 +45,7 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + utils.default_flagfile() logging.warn('Starting instance monitor') # pylint: disable-msg=C0103 monitor = monitor.InstanceMonitor() diff --git a/bin/nova-manage b/bin/nova-manage index 08b3da1231a6..eb7c6b87b533 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -467,7 +467,7 @@ def methods_of(obj): def main(): """Parse options and call the appropriate class/method.""" - utils.default_flagfile('/etc/nova/nova-manage.conf') + utils.default_flagfile() argv = FLAGS(sys.argv) if FLAGS.verbose: diff --git a/bin/nova-network b/bin/nova-network index fa88aeb47612..342a63058cc3 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -34,10 +34,12 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import service from nova import twistd +from nova import utils if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + utils.default_flagfile() application = service.Service.create() # pylint: disable-msg=C0103 diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 38a8f213fedf..069b5a6fa950 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -34,10 +34,12 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import service from nova import twistd +from nova import utils if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + utils.default_flagfile() application = service.Service.create() diff --git a/bin/nova-volume b/bin/nova-volume index b9e235717312..26148b0ecc2b 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -34,10 +34,12 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import service from nova import twistd +from nova import utils if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + utils.default_flagfile() application = service.Service.create() # pylint: disable-msg=C0103 From 693624831066af08dcf488d1528b017048fbde71 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 17:56:43 +0000 Subject: [PATCH 09/83] changed bridge_dev to vlan_interface --- nova/network/linux_net.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 68037ed9a19e..d39ed9f86746 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -42,8 +42,8 @@ flags.DEFINE_string('networks_path', utils.abspath('../networks'), 'Location to keep network config files') flags.DEFINE_string('public_interface', 'vlan1', 'Interface for public IP addresses') -flags.DEFINE_string('bridge_dev', None, - 'network device for bridges') +flags.DEFINE_string('vlan_interface', 'eth0', + 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') flags.DEFINE_string('routing_source_ip', '127.0.0.1', @@ -134,7 +134,7 @@ def ensure_vlan(vlan_num): if not _device_exists(interface): logging.debug("Starting VLAN inteface %s", interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - _execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, vlan_num)) + _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) return interface From 6811f824f7c1edd1b3882621d80fba54a2bf019d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 17:57:12 +0000 Subject: [PATCH 10/83] added flat_interface for flat_dhcp binding --- nova/network/manager.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 96f8cf50b0b5..8f13a8230003 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -61,6 +61,8 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') +flags.DEFINE_string('flat_interface', None, + 'flat_dhcp will bridge into this interface if set') flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', @@ -319,7 +321,7 @@ class FlatDHCPManager(FlatManager): """Sets up matching network for compute hosts.""" network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.bridge_dev, + FLAGS.flat_interface, network_ref) def setup_fixed_ip(self, context, address): @@ -338,7 +340,7 @@ class FlatDHCPManager(FlatManager): self.db.network_update(context, network_id, net) network_ref = db.network_get(context, network_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.bridge_dev, + FLAGS.flat_interface, network_ref) From 015b7e5848c010ab86f067fb5dff462c2f34f4f9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 17:57:25 +0000 Subject: [PATCH 11/83] updated nova.sh --- contrib/nova.sh | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index abb8c7de0203..8cbd5f605f80 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash DIR=`pwd` CMD=$1 -SOURCE_BRANCH=lp:nova +SOURCE_BRANCH=lp:~anso/nova/deploy if [ -n "$2" ]; then SOURCE_BRANCH=$2 fi -DIRNAME=nova +DIRNAME=deploy NOVA_DIR=$DIR/$DIRNAME if [ -n "$3" ]; then NOVA_DIR=$DIR/$3 @@ -17,11 +17,10 @@ if [ ! -n "$HOST_IP" ]; then # you should explicitly set HOST_IP in your environment HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` fi - USE_MYSQL=${USE_MYSQL:-0} MYSQL_PASS=${MYSQL_PASS:-nova} TEST=${TEST:-0} -USE_LDAP=${USE_LDAP:-0} +USE_LDAP=${USE_LDAP:-1} LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} NET_MAN=${NET_MAN:-VlanManager} # NOTE(vish): If you are using FlatDHCP make sure that this is not your @@ -48,14 +47,15 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF --FAKE_subdomain=ec2 --network_manager=nova.network.manager.$NET_MAN --cc_host=$HOST_IP +--cc_dmz=$HOST_IP --routing_source_ip=$HOST_IP --sql_connection=$SQL_CONN --auth_driver=nova.auth.$AUTH --libvirt_type=$LIBVIRT_TYPE NOVA_CONF_EOF -if [ -n "$BRIDGE_DEV" ]; then - echo "--bridge_dev=$BRIDGE_DEV" >>/etc/nova/nova-manage.conf +if [ -n "$FLAT_INTERFACE" ]; then + echo "--flat_interface=$FLAT_INTERFACE" >>/etc/nova/nova-manage.conf fi if [ "$CMD" == "branch" ]; then @@ -72,9 +72,10 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-software-properties sudo add-apt-repository ppa:nova-core/ppa sudo apt-get update - sudo apt-get install -y dnsmasq open-iscsi kpartx kvm gawk iptables ebtables + sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables sudo apt-get install -y user-mode-linux kvm libvirt-bin - sudo apt-get install -y screen iscsitarget euca2ools vlan curl rabbitmq-server + sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server + sudo apt-get install -y lvm2 iscsitarget open-iscsi echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget sudo /etc/init.d/iscsitarget restart sudo modprobe kvm @@ -119,6 +120,9 @@ if [ "$CMD" == "run" ]; then rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks $NOVA_DIR/tools/clean-vlans + sleep 3 + ifdown eth0 + ifup eth0 if [ ! -d "$NOVA_DIR/images" ]; then ln -s $DIR/images $NOVA_DIR/images fi @@ -147,6 +151,11 @@ if [ "$CMD" == "run" ]; then screen_it scheduler "$NOVA_DIR/bin/nova-scheduler --flagfile=/etc/nova/nova-manage.conf" screen_it volume "$NOVA_DIR/bin/nova-volume --flagfile=/etc/nova/nova-manage.conf" screen_it test ". $NOVA_DIR/novarc" + + sleep 3 + + $NOVA_DIR/bin/nova-manage service enable `hostname` nova-compute + $NOVA_DIR/bin/nova-manage service enable `hostname` nova-volume screen -S nova -x fi @@ -154,12 +163,13 @@ if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then # shutdown instances . $NOVA_DIR/novarc; euca-describe-instances | grep i- | cut -f2 | xargs euca-terminate-instances sleep 2 + # delete volumes + . $NOVA_DIR/novarc; euca-describe-volumes | grep vol- | cut -f2 | xargs -n1 euca-delete-volume fi if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then screen -S nova -X quit rm *.pid* - $NOVA_DIR/tools/setup_iptables.sh clear fi if [ "$CMD" == "scrub" ]; then @@ -169,5 +179,4 @@ if [ "$CMD" == "scrub" ]; then else virsh list | grep i- | awk '{print \$1}' | xargs -n1 virsh destroy fi - vblade-persist ls | grep vol- | awk '{print \$1\" \"\$2}' | xargs -n2 vblade-persist destroy fi From 84521218b84d2eed307364c9efc9f6f2ee212aac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 23:18:02 +0000 Subject: [PATCH 12/83] docstrings, more flags, breakout of metadata forwarding --- nova/network/linux_net.py | 10 +++--- nova/network/manager.py | 67 +++++++++++++++++++++++++++++++++++---- 2 files changed, 65 insertions(+), 12 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index d39ed9f86746..b30ddb6671e9 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -53,15 +53,15 @@ flags.DEFINE_bool('use_nova_chains', False, DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] - -def init_host(): - """Basic networking setup goes here""" - # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port - # forwarding entries and a default DNAT entry. +def metadata_forward(): + """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) + +def init_host(): + """Basic networking setup goes here""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. _confirm_rule("POSTROUTING", "-t nat -s %s " diff --git a/nova/network/manager.py b/nova/network/manager.py index 8f13a8230003..f9489d2ade1b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -27,6 +27,7 @@ topologies. All of the network commands are issued to a subclass of :network_driver: Driver to use for network creation :flat_network_bridge: Bridge device for simple network instances +:flat_interface: FlatDhcp will bridge into this interface if set :flat_network_dns: Dns for simple network :flat_network_dhcp_start: Dhcp start for FlatDhcp :vlan_start: First VLAN for private networks @@ -61,10 +62,12 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') -flags.DEFINE_string('flat_interface', None, - 'flat_dhcp will bridge into this interface if set') flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') +flags.DEFINE_bool('flat_injected', True, + 'Whether to attempt to inject network setup into guest') +flags.DEFINE_string('flat_interface', None, + 'FlatDhcp will bridge into this interface if set') flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') @@ -177,9 +180,11 @@ class NetworkManager(manager.Manager): if instance_ref['mac_address'] != mac: raise exception.Error("IP %s leased to bad mac %s vs %s" % (address, instance_ref['mac_address'], mac)) + now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], - {'leased': True}) + {'leased': True, + 'updated_at': now}) if not fixed_ip_ref['allocated']: logging.warn("IP %s leased that was already deallocated", address) @@ -248,7 +253,31 @@ class NetworkManager(manager.Manager): class FlatManager(NetworkManager): - """Basic network where no vlans are used.""" + """Basic network where no vlans are used. + + FlatManager does not do any bridge or vlan creation. The user is + responsible for setting up whatever bridge is specified in + flat_network_bridge (br100 by default). This bridge needs to be created + on all compute hosts. + + The idea is to create a single network for the host with a command like: + nova-manage network create 192.168.0.0/24 256 1. Creating multiple + networks for for one manager is currently not supported, but could be + added by modifying allocate_fixed_ip and get_network to get the a network + with new logic instead of network_get_by_bridge. Arbitrary lists of + addresses in a single network can be accomplished with manual db editing. + + If flat_injected is True, the compute host will attempt to inject network + config into the guest. It attempts to modify /etc/network/interfaces and + currently only works on debian based systems. To support a wider range of + OSes, some other method may need to be devised to let the guest know which + ip it should be using so that it can configure itself. Perhaps an attached + disk or serial device with configuration info. + + Metadata forwarding must be handled by the gateway, and since nova does + not do any setup in this mode, it must be done manually. Requests to + 169.254.169.254 port 80 will need to be forwarded to the api server. + """ def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" @@ -309,13 +338,25 @@ class FlatManager(NetworkManager): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" net = {} - net['injected'] = True + net['injected'] = FLAGS.flat_injected net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) class FlatDHCPManager(FlatManager): - """Flat networking with dhcp.""" + """Flat networking with dhcp. + + FlatDHCPManager will start up one dhcp server to give out addresses. + It never injects network settings into the guest. Otherwise it behaves + like FlatDHCPManager. + """ + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + super(FlatDHCPManager, self).init_host() + self.driver.metadata_forward() def setup_compute_network(self, context, instance_id): """Sets up matching network for compute hosts.""" @@ -345,7 +386,18 @@ class FlatDHCPManager(FlatManager): class VlanManager(NetworkManager): - """Vlan network with dhcp.""" + """Vlan network with dhcp. + + VlanManager is the most complicated. It will create a host-managed + vlan for each project. Each project gets its own subnet. The networks + and associated subnets are created with nova-manage using a command like: + nova-manage network create 10.0.0.0/8 16 3. This will create 3 networks + of 16 addresses from the beginning of the 10.0.0.0 range. + + A dhcp server is run for each subnet, so each project will have its own. + For this mode to be useful, each project will need a vpn to access the + instances in its subnet. + """ @defer.inlineCallbacks def periodic_tasks(self, context=None): @@ -365,6 +417,7 @@ class VlanManager(NetworkManager): standalone service. """ super(VlanManager, self).init_host() + self.driver.metadata_forward() self.driver.init_host() def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): From 521dd52e49feeae04108f3e21480f42456b4e4c7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 23:56:26 +0000 Subject: [PATCH 13/83] fix typos in docstring --- nova/network/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index f9489d2ade1b..a7298b47f0c2 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -261,7 +261,7 @@ class FlatManager(NetworkManager): on all compute hosts. The idea is to create a single network for the host with a command like: - nova-manage network create 192.168.0.0/24 256 1. Creating multiple + nova-manage network create 192.168.0.0/24 1 256. Creating multiple networks for for one manager is currently not supported, but could be added by modifying allocate_fixed_ip and get_network to get the a network with new logic instead of network_get_by_bridge. Arbitrary lists of @@ -391,7 +391,7 @@ class VlanManager(NetworkManager): VlanManager is the most complicated. It will create a host-managed vlan for each project. Each project gets its own subnet. The networks and associated subnets are created with nova-manage using a command like: - nova-manage network create 10.0.0.0/8 16 3. This will create 3 networks + nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks of 16 addresses from the beginning of the 10.0.0.0 range. A dhcp server is run for each subnet, so each project will have its own. From d62d3f7bcf06802662f77f8013c9da99eccec0a7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 24 Nov 2010 00:16:47 +0000 Subject: [PATCH 14/83] pep8 --- nova/network/linux_net.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7dbf6b733dc7..391abfb76975 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -53,6 +53,7 @@ flags.DEFINE_bool('use_nova_chains', False, DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " From 309c8b8ff8732e8d80c445381aee7e1f9852def6 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 24 Nov 2010 22:10:21 +0000 Subject: [PATCH 15/83] Adding support for modification only of user accounts. --- nova/auth/ldapdriver.py | 110 ++++++++++++++++++++++++++++++---------- 1 file changed, 84 insertions(+), 26 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65cf4..d1ef37cf0c5e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') +flags.DEFINE_boolean('ldap_user_modify_only', False, + 'Modify attributes for users instead of creating/deleting') flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects') flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', @@ -89,8 +91,7 @@ class LdapDriver(object): def get_user(self, uid): """Retrieve user by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + attr = self.__get_ldap_user(uid) return self.__to_user(attr) def get_user_from_access_key(self, access): @@ -110,7 +111,12 @@ class LdapDriver(object): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') - return [self.__to_user(attr) for attr in attrs] + users = [] + for attr in attrs: + user = self.__to_user(attr) + if user != None: + users.append(user) + return users def get_projects(self, uid=None): """Retrieve list of projects""" @@ -125,21 +131,46 @@ class LdapDriver(object): """Create a user""" if self.__user_exists(name): raise exception.Duplicate("LDAP user %s already exists" % name) - attr = [ - ('objectclass', ['person', - 'organizationalPerson', - 'inetOrgPerson', - 'novaUser']), - ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), - ('sn', [name]), - ('cn', [name]), - ('secretKey', [secret_key]), - ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), - ] - self.conn.add_s(self.__uid_to_dn(name), attr) - return self.__to_user(dict(attr)) + if FLAGS.ldap_user_modify_only: + if self.__ldap_user_exists(name): + # Retrieve user by name + user = self.__get_ldap_user(name) + if user.has_key('accessKey') and user.has_key('secretKey') and user.has_key('isAdmin'): + raise exception.Duplicate("LDAP user %s already exists" % name) + else: + # Entry could be malformed, test for missing attrs. + # Malformed entries are useless, replace attributes found. + attr = [] + if user.has_key('secretKey'): + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) + else: + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) + if user.has_key('accessKey'): + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) + else: + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) + if user.has_key('isAdmin'): + attr.append((self.ldap.MOD_REPLACE, 'isAdmin', [str(is_admin).upper()])) + else: + attr.append((self.ldap.MOD_ADD, 'isAdmin', [str(is_admin).upper()])) + self.conn.modify_s(self.__uid_to_dn(name), attr) + return self.get_user(name) + else: + attr = [ + ('objectclass', ['person', + 'organizationalPerson', + 'inetOrgPerson', + 'novaUser']), + ('ou', [FLAGS.ldap_user_unit]), + ('uid', [name]), + ('sn', [name]), + ('cn', [name]), + ('secretKey', [secret_key]), + ('accessKey', [access_key]), + ('isAdmin', [str(is_admin).upper()]), + ] + self.conn.add_s(self.__uid_to_dn(name), attr) + return self.__to_user(dict(attr)) def create_project(self, name, manager_uid, description=None, member_uids=None): @@ -256,7 +287,21 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s doesn't exist" % uid) self.__remove_from_all(uid) - self.conn.delete_s(self.__uid_to_dn(uid)) + if FLAGS.ldap_user_modify_only: + # Delete attributes + attr = [] + # Retrieve user by name + user = self.__get_ldap_user(uid) + if user.has_key('secretKey'): + attr.append((self.ldap.MOD_DELETE, 'secretKey', user['secretKey'])) + if user.has_key('accessKey'): + attr.append((self.ldap.MOD_DELETE, 'accessKey', user['accessKey'])) + if user.has_key('isAdmin'): + attr.append((self.ldap.MOD_DELETE, 'isAdmin', user['isAdmin'])) + self.conn.modify_s(self.__uid_to_dn(uid), attr) + else: + # Delete entry + self.conn.delete_s(self.__uid_to_dn(uid)) def delete_project(self, project_id): """Delete a project""" @@ -265,7 +310,7 @@ class LdapDriver(object): self.__delete_group(project_dn) def modify_user(self, uid, access_key=None, secret_key=None, admin=None): - """Modify an existing project""" + """Modify an existing user""" if not access_key and not secret_key and admin is None: return attr = [] @@ -281,10 +326,20 @@ class LdapDriver(object): """Check if user exists""" return self.get_user(uid) != None + def __ldap_user_exists(self, uid): + """Check if the user exists in ldap""" + return self.__get_ldap_user(uid) != None + def __project_exists(self, project_id): """Check if project exists""" return self.get_project(project_id) != None + def __get_ldap_user(self, uid): + """Retrieve LDAP user entry by id""" + attr = self.__find_object(self.__uid_to_dn(uid), + '(objectclass=novaUser)') + return attr + def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" objects = self.__find_objects(dn, query, scope) @@ -449,12 +504,15 @@ class LdapDriver(object): """Convert ldap attributes to User object""" if attr == None: return None - return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], - 'access': attr['accessKey'][0], - 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + if (attr.has_key('accessKey') and attr.has_key('secretKey') and attr.has_key('isAdmin')): + return { + 'id': attr['uid'][0], + 'name': attr['uid'][0], + 'access': attr['accessKey'][0], + 'secret': attr['secretKey'][0], + 'admin': (attr['isAdmin'][0] == 'TRUE')} + else: + return None def __to_project(self, attr): """Convert ldap attributes to Project object""" From d7515bcb1d35e2e558a01c381b1d3a22165daa4b Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 24 Nov 2010 22:34:52 +0000 Subject: [PATCH 16/83] Setting "name" back to "cn", since id and name should be separate --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index d1ef37cf0c5e..95519d000576 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -507,7 +507,7 @@ class LdapDriver(object): if (attr.has_key('accessKey') and attr.has_key('secretKey') and attr.has_key('isAdmin')): return { 'id': attr['uid'][0], - 'name': attr['uid'][0], + 'name': attr['cn'][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], 'admin': (attr['isAdmin'][0] == 'TRUE')} From 1188dd95fbfef144ca71a3c9df2f7dbdb665c97f Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 24 Nov 2010 14:52:10 -0800 Subject: [PATCH 17/83] Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two. --- nova/api/ec2/cloud.py | 174 ++++------------------- nova/api/openstack/servers.py | 96 ++----------- nova/compute/instance_types.py | 20 +++ nova/compute/manager.py | 130 +++++++++++++++++ nova/quota.py | 5 + nova/tests/api/openstack/fakes.py | 2 +- nova/tests/api/openstack/test_servers.py | 6 + nova/tests/quota_unittest.py | 16 ++- 8 files changed, 211 insertions(+), 238 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9327bf0d44d5..c694579674fb 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -39,7 +39,7 @@ from nova import flags from nova import quota from nova import rpc from nova import utils -from nova.compute.instance_types import INSTANCE_TYPES +from nova.compute import instance_types from nova.api import cloud from nova.image.s3 import S3ImageService @@ -50,11 +50,6 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager') InvalidInputException = exception.InvalidInputException -class QuotaError(exception.ApiError): - """Quota Exceeeded""" - pass - - def _gen_key(context, user_id, key_name): """Generate a key @@ -127,7 +122,7 @@ class CloudController(object): for instance in db.instance_get_all_by_project(context, project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], - INSTANCE_TYPES[instance['instance_type']]['vcpus']) + instance['vcpus']) key = str(instance['key_name']) if key in result: result[key].append(line) @@ -260,7 +255,7 @@ class CloudController(object): return True def describe_security_groups(self, context, group_name=None, **kwargs): - self._ensure_default_security_group(context) + self.compute_manager.ensure_default_security_group(context) if context.user.is_admin(): groups = db.security_group_get_all(context) else: @@ -358,7 +353,7 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): - self._ensure_default_security_group(context) + self.compute_manager.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -383,7 +378,7 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): - self._ensure_default_security_group(context) + self.compute_manager.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -419,7 +414,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): - self._ensure_default_security_group(context) + self.compute_manager.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError('group %s already exists' % group_name) @@ -505,9 +500,8 @@ class CloudController(object): if quota.allowed_volumes(context, 1, size) < 1: logging.warn("Quota exceeeded for %s, tried to create %sG volume", context.project_id, size) - raise QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % - size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) vol = {} vol['size'] = size vol['user_id'] = context.user.id @@ -699,8 +693,8 @@ class CloudController(object): if quota.allowed_floating_ips(context, 1) < 1: logging.warn("Quota exceeeded for %s, tried to allocate address", context.project_id) - raise QuotaError("Address quota exceeded. You cannot " - "allocate any more addresses") + raise quota.QuotaError("Address quota exceeded. You cannot " + "allocate any more addresses") network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, @@ -752,137 +746,25 @@ class CloudController(object): "args": {"network_id": network_ref['id']}}) return db.queue_get_for(context, FLAGS.network_topic, host) - def _ensure_default_security_group(self, context): - try: - db.security_group_get_by_name(context, - context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user.id, - 'project_id': context.project_id} - group = db.security_group_create(context, values) - def run_instances(self, context, **kwargs): - instance_type = kwargs.get('instance_type', 'm1.small') - if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s", - instance_type) - # check quota - max_instances = int(kwargs.get('max_count', 1)) - min_instances = int(kwargs.get('min_count', max_instances)) - num_instances = quota.allowed_instances(context, - max_instances, - instance_type) - if num_instances < min_instances: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_instances) - raise QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - vpn = kwargs['image_id'] == FLAGS.vpn_image_id - - if not vpn: - image = self.image_service.show(context, kwargs['image_id']) - - # FIXME(ja): if image is vpn, this breaks - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self.image_service.show(context, kernel_id) - self.image_service.show(context, ramdisk_id) - - logging.debug("Going to run %s instances...", num_instances) - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if 'key_name' in kwargs: - key_pair_ref = db.key_pair_get(context, - context.user.id, - kwargs['key_name']) - key_data = key_pair_ref['public_key'] - - security_group_arg = kwargs.get('security_group', ["default"]) - if not type(security_group_arg) is list: - security_group_arg = [security_group_arg] - - security_groups = [] - self._ensure_default_security_group(context) - for security_group_name in security_group_arg: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - reservation_id = utils.generate_uid('r') - base_options = {} - base_options['state_description'] = 'scheduling' - base_options['image_id'] = image_id - base_options['kernel_id'] = kernel_id - base_options['ramdisk_id'] = ramdisk_id - base_options['reservation_id'] = reservation_id - base_options['key_data'] = key_data - base_options['key_name'] = kwargs.get('key_name', None) - base_options['user_id'] = context.user.id - base_options['project_id'] = context.project_id - base_options['user_data'] = kwargs.get('user_data', '') - - base_options['display_name'] = kwargs.get('display_name') - base_options['display_description'] = kwargs.get('display_description') - - type_data = INSTANCE_TYPES[instance_type] - base_options['instance_type'] = instance_type - base_options['memory_mb'] = type_data['memory_mb'] - base_options['vcpus'] = type_data['vcpus'] - base_options['local_gb'] = type_data['local_gb'] - elevated = context.elevated() - - for num in range(num_instances): - - instance_ref = self.compute_manager.create_instance(context, - security_groups, - mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - inst_id = instance_ref['id'] - - internal_id = instance_ref['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) - - self.compute_manager.update_instance(context, - inst_id, - hostname=ec2_id) - - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - inst_id, - vpn) - network_topic = self._get_network_topic(context) - rpc.cast(elevated, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": inst_id}}) - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project.name, context.user.name, inst_id)) - return self._format_run_instances(context, reservation_id) + max_count = int(kwargs.get('max_count', 1)) + instances = self.compute_manager.create_instances(context, + instance_types.get_by_type(kwargs.get('instance_type', None)), + self.image_service, + kwargs['image_id'], + self._get_network_topic(context), + min_count=int(kwargs.get('min_count', max_count)), + max_count=max_count, + kernel_id=kwargs.get('kernel_id'), + ramdisk_id=kwargs.get('ramdisk_id'), + name=kwargs.get('display_name'), + description=kwargs.get('display_description'), + user_data=kwargs.get('user_data', ''), + key_name=kwargs.get('key_name'), + security_group=kwargs.get('security_group'), + generate_hostname=internal_id_to_ec2_id) + return self._format_run_instances(context, + instances[0]['reservation_id']) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 1d8aa2fa4dab..e1e2bf7fd441 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -140,22 +140,23 @@ class Controller(wsgi.Controller): def create(self, req): """ Creates a new server for a given user """ - env = self._deserialize(req.body, req) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - #try: - inst = self._build_server_instance(req, env) - #except Exception, e: - # return faults.Fault(exc.HTTPUnprocessableEntity()) - user_id = req.environ['nova.context']['user']['id'] - rpc.cast(context.RequestContext(user_id, user_id), - FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id": inst['id']}}) - return _entity_inst(inst) + ctxt = context.RequestContext(user_id, user_id) + key_pair = self.db_driver.key_pair_get_all_by_user(None, user_id)[0] + instances = self.compute_manager.create_instances(ctxt, + instance_types.get_by_flavor_id(env['server']['flavorId']), + utils.import_object(FLAGS.image_service), + env['server']['imageId'], + self._get_network_topic(ctxt), + name=env['server']['name'], + description=env['server']['name'], + key_name=key_pair['name'], + key_data=key_pair['public_key']) + return _entity_inst(instances[0]) def update(self, req, id): """ Updates the server name or password """ @@ -191,79 +192,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) cloud.reboot(id) - def _build_server_instance(self, req, env): - """Build instance data structure and save it to the data store.""" - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = {} - - user_id = req.environ['nova.context']['user']['id'] - ctxt = context.RequestContext(user_id, user_id) - - flavor_id = env['server']['flavorId'] - - instance_type, flavor = [(k, v) for k, v in - instance_types.INSTANCE_TYPES.iteritems() - if v['flavorid'] == flavor_id][0] - - image_id = env['server']['imageId'] - img_service = utils.import_object(FLAGS.image_service) - - image = img_service.show(image_id) - - if not image: - raise Exception("Image not found") - - inst['server_name'] = env['server']['name'] - inst['image_id'] = image_id - inst['user_id'] = user_id - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - inst['project_id'] = user_id - - inst['state_description'] = 'scheduling' - inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel) - inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk) - inst['reservation_id'] = utils.generate_uid('r') - - inst['display_name'] = env['server']['name'] - inst['display_description'] = env['server']['name'] - - #TODO(dietz) this may be ill advised - key_pair_ref = self.db_driver.key_pair_get_all_by_user( - None, user_id)[0] - - inst['key_data'] = key_pair_ref['public_key'] - inst['key_name'] = key_pair_ref['name'] - - #TODO(dietz) stolen from ec2 api, see TODO there - inst['security_group'] = 'default' - - # Flavor related attributes - inst['instance_type'] = instance_type - inst['memory_mb'] = flavor['memory_mb'] - inst['vcpus'] = flavor['vcpus'] - inst['local_gb'] = flavor['local_gb'] - inst['mac_address'] = utils.generate_mac() - inst['launch_index'] = 0 - - ref = self.compute_manager.create_instance(ctxt, **inst) - inst['id'] = ref['internal_id'] - - inst['hostname'] = str(ref['internal_id']) - self.compute_manager.update_instance(ctxt, inst['id'], **inst) - - address = self.network_manager.allocate_fixed_ip(ctxt, - inst['id']) - - # TODO(vish): This probably should be done in the scheduler - # network is setup when host is assigned - network_topic = self._get_network_topic(ctxt) - rpc.call(ctxt, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - return inst - def _get_network_topic(self, context): """Retrieves the network host for a project""" network_ref = self.network_manager.get_network(context) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 67ee8f8a8aef..a2679e0fc2e6 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -21,9 +21,29 @@ The built-in instance properties. """ +from nova import flags + +FLAGS = flags.FLAGS INSTANCE_TYPES = { 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + + +def get_by_type(instance_type): + """Build instance data structure and save it to the data store.""" + if instance_type is None: + return FLAGS.default_instance_type + if instance_type not in INSTANCE_TYPES: + raise exception.ApiError("Unknown instance type: %s", + instance_type) + return instance_type + + +def get_by_flavor_id(flavor_id): + for instance_type, details in INSTANCE_TYPES.iteritems(): + if details['flavorid'] == flavor_id: + return instance_type + return FLAGS.default_instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 890d79fba92e..cfc3d4bbd960 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,13 +36,18 @@ termination. import datetime import logging +import time from twisted.internet import defer +from nova import db from nova import exception from nova import flags from nova import manager +from nova import quota +from nova import rpc from nova import utils +from nova.compute import instance_types from nova.compute import power_state @@ -53,6 +58,11 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') +def generate_default_hostname(internal_id): + """Default function to generate a hostname given an instance reference.""" + return str(internal_id) + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -84,6 +94,126 @@ class ComputeManager(manager.Manager): """This call passes stright through to the virtualization driver.""" yield self.driver.refresh_security_group(security_group_id) + # TODO(eday): network_topic arg should go away once we push network + # allocation into the scheduler or compute worker. + def create_instances(self, context, instance_type, image_service, image_id, + network_topic, min_count=1, max_count=1, + kernel_id=None, ramdisk_id=None, name='', + description='', user_data='', key_name=None, + key_data=None, security_group='default', + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quote and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = image_service.show(context, image_id) + if not image: + raise Exception("Image not found") + if kernel_id is None: + kernel_id = image.get('kernelId', FLAGS.default_kernel) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # Make sure we have access to kernel and ramdisk + image_service.show(context, kernel_id) + image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'server_name': name, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': name, + 'display_description': description, + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug("Going to run %s instances...", num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance_ref = self.create_instance(context, security_groups, + **instance) + instance_id = instance_ref['id'] + internal_id = instance_ref['internal_id'] + hostname = generate_hostname(internal_id) + self.update_instance(context, instance_id, hostname=hostname) + instances.append(dict(id=instance_id, internal_id=internal_id, + hostname=hostname, **instance)) + + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(elevated, + network_topic, + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + logging.debug("Casting to scheduler for %s/%s's instance %s" % + (context.project_id, context.user_id, instance_id)) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + group = db.security_group_create(context, values) + def create_instance(self, context, security_groups=None, **kwargs): """Creates the instance in the datastore and returns the new instance as a mapping diff --git a/nova/quota.py b/nova/quota.py index 01dd0ecd4547..f6ca9f77c733 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -94,3 +94,8 @@ def allowed_floating_ips(context, num_floating_ips): quota = get_quota(context, project_id) allowed_floating_ips = quota['floating_ips'] - used_floating_ips return min(num_floating_ips, allowed_floating_ips) + + +class QuotaError(exception.ApiError): + """Quota Exceeeded""" + pass diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 639a2ebe4e4e..e819fbc17526 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -73,7 +73,7 @@ def stub_out_key_pair_funcs(stubs): def stub_out_image_service(stubs): - def fake_image_show(meh, id): + def fake_image_show(meh, context, id): return dict(kernelId=1, ramdiskId=1) stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8cfc6c45a330..0d540c0375d7 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -43,6 +43,10 @@ def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] +def return_security_group(context, instance_id, security_group_id): + pass + + def stub_instance(id, user_id=1): return Instance(id=id, state=0, image_id=10, server_name='server%s' % id, user_id=user_id) @@ -63,6 +67,8 @@ class ServersTest(unittest.TestCase): return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) + self.stubs.Set(nova.db.api, 'instance_add_security_group', + return_security_group) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py index b7c1d2acc01e..1966b51f7b55 100644 --- a/nova/tests/quota_unittest.py +++ b/nova/tests/quota_unittest.py @@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase): for i in range(FLAGS.quota_instances): instance_id = self._create_instance() instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, - instance_type='m1.small') + instance_type='m1.small', + image_id='fake') for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, - instance_type='m1.small') + instance_type='m1.small', + image_id='fake') for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase): for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) - self.assertRaises(cloud.QuotaError, self.cloud.create_volume, + self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) for volume_id in volume_ids: @@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) - self.assertRaises(cloud.QuotaError, + self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) @@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase): # make an rpc.call, the test just finishes with OK. It # appears to be something in the magic inline callbacks # that is breaking. - self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, + self.assertRaises(quota.QuotaError, self.cloud.allocate_address, self.context) db.floating_ip_destroy(context.get_admin_context(), address) From 7d771bf9c549499c0a138ea991da5df537e0dd88 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 24 Nov 2010 15:16:23 -0800 Subject: [PATCH 18/83] The image server should throw not found errors, don't need to check in compute manager. --- nova/compute/manager.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cfc3d4bbd960..3f870f866622 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -117,8 +117,6 @@ class ComputeManager(manager.Manager): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = image_service.show(context, image_id) - if not image: - raise Exception("Image not found") if kernel_id is None: kernel_id = image.get('kernelId', FLAGS.default_kernel) if ramdisk_id is None: From 725a1f638b01985a2ae9a4f0a68f16ef31914a51 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 26 Nov 2010 17:01:50 +0000 Subject: [PATCH 19/83] This modification should have occured in a different branch. Reverting. --- nova/auth/ldapdriver.py | 110 ++++++++++------------------------------ 1 file changed, 26 insertions(+), 84 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95519d000576..ceade1d65cf4 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,8 +40,6 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') -flags.DEFINE_boolean('ldap_user_modify_only', False, - 'Modify attributes for users instead of creating/deleting') flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects') flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', @@ -91,7 +89,8 @@ class LdapDriver(object): def get_user(self, uid): """Retrieve user by id""" - attr = self.__get_ldap_user(uid) + attr = self.__find_object(self.__uid_to_dn(uid), + '(objectclass=novaUser)') return self.__to_user(attr) def get_user_from_access_key(self, access): @@ -111,12 +110,7 @@ class LdapDriver(object): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') - users = [] - for attr in attrs: - user = self.__to_user(attr) - if user != None: - users.append(user) - return users + return [self.__to_user(attr) for attr in attrs] def get_projects(self, uid=None): """Retrieve list of projects""" @@ -131,46 +125,21 @@ class LdapDriver(object): """Create a user""" if self.__user_exists(name): raise exception.Duplicate("LDAP user %s already exists" % name) - if FLAGS.ldap_user_modify_only: - if self.__ldap_user_exists(name): - # Retrieve user by name - user = self.__get_ldap_user(name) - if user.has_key('accessKey') and user.has_key('secretKey') and user.has_key('isAdmin'): - raise exception.Duplicate("LDAP user %s already exists" % name) - else: - # Entry could be malformed, test for missing attrs. - # Malformed entries are useless, replace attributes found. - attr = [] - if user.has_key('secretKey'): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) - else: - attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) - if user.has_key('accessKey'): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) - else: - attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) - if user.has_key('isAdmin'): - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', [str(is_admin).upper()])) - else: - attr.append((self.ldap.MOD_ADD, 'isAdmin', [str(is_admin).upper()])) - self.conn.modify_s(self.__uid_to_dn(name), attr) - return self.get_user(name) - else: - attr = [ - ('objectclass', ['person', - 'organizationalPerson', - 'inetOrgPerson', - 'novaUser']), - ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), - ('sn', [name]), - ('cn', [name]), - ('secretKey', [secret_key]), - ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), - ] - self.conn.add_s(self.__uid_to_dn(name), attr) - return self.__to_user(dict(attr)) + attr = [ + ('objectclass', ['person', + 'organizationalPerson', + 'inetOrgPerson', + 'novaUser']), + ('ou', [FLAGS.ldap_user_unit]), + ('uid', [name]), + ('sn', [name]), + ('cn', [name]), + ('secretKey', [secret_key]), + ('accessKey', [access_key]), + ('isAdmin', [str(is_admin).upper()]), + ] + self.conn.add_s(self.__uid_to_dn(name), attr) + return self.__to_user(dict(attr)) def create_project(self, name, manager_uid, description=None, member_uids=None): @@ -287,21 +256,7 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s doesn't exist" % uid) self.__remove_from_all(uid) - if FLAGS.ldap_user_modify_only: - # Delete attributes - attr = [] - # Retrieve user by name - user = self.__get_ldap_user(uid) - if user.has_key('secretKey'): - attr.append((self.ldap.MOD_DELETE, 'secretKey', user['secretKey'])) - if user.has_key('accessKey'): - attr.append((self.ldap.MOD_DELETE, 'accessKey', user['accessKey'])) - if user.has_key('isAdmin'): - attr.append((self.ldap.MOD_DELETE, 'isAdmin', user['isAdmin'])) - self.conn.modify_s(self.__uid_to_dn(uid), attr) - else: - # Delete entry - self.conn.delete_s(self.__uid_to_dn(uid)) + self.conn.delete_s(self.__uid_to_dn(uid)) def delete_project(self, project_id): """Delete a project""" @@ -310,7 +265,7 @@ class LdapDriver(object): self.__delete_group(project_dn) def modify_user(self, uid, access_key=None, secret_key=None, admin=None): - """Modify an existing user""" + """Modify an existing project""" if not access_key and not secret_key and admin is None: return attr = [] @@ -326,20 +281,10 @@ class LdapDriver(object): """Check if user exists""" return self.get_user(uid) != None - def __ldap_user_exists(self, uid): - """Check if the user exists in ldap""" - return self.__get_ldap_user(uid) != None - def __project_exists(self, project_id): """Check if project exists""" return self.get_project(project_id) != None - def __get_ldap_user(self, uid): - """Retrieve LDAP user entry by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') - return attr - def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" objects = self.__find_objects(dn, query, scope) @@ -504,15 +449,12 @@ class LdapDriver(object): """Convert ldap attributes to User object""" if attr == None: return None - if (attr.has_key('accessKey') and attr.has_key('secretKey') and attr.has_key('isAdmin')): - return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], - 'access': attr['accessKey'][0], - 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} - else: - return None + return { + 'id': attr['uid'][0], + 'name': attr['cn'][0], + 'access': attr['accessKey'][0], + 'secret': attr['secretKey'][0], + 'admin': (attr['isAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" From c3072aea3dc5d44d26fcac5c7db65b8cc445fccc Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 26 Nov 2010 17:04:27 +0000 Subject: [PATCH 20/83] Adding support for modification only of user accounts. --- nova/auth/ldapdriver.py | 110 ++++++++++++++++++++++++++++++---------- 1 file changed, 84 insertions(+), 26 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65cf4..95519d000576 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') +flags.DEFINE_boolean('ldap_user_modify_only', False, + 'Modify attributes for users instead of creating/deleting') flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects') flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', @@ -89,8 +91,7 @@ class LdapDriver(object): def get_user(self, uid): """Retrieve user by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + attr = self.__get_ldap_user(uid) return self.__to_user(attr) def get_user_from_access_key(self, access): @@ -110,7 +111,12 @@ class LdapDriver(object): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') - return [self.__to_user(attr) for attr in attrs] + users = [] + for attr in attrs: + user = self.__to_user(attr) + if user != None: + users.append(user) + return users def get_projects(self, uid=None): """Retrieve list of projects""" @@ -125,21 +131,46 @@ class LdapDriver(object): """Create a user""" if self.__user_exists(name): raise exception.Duplicate("LDAP user %s already exists" % name) - attr = [ - ('objectclass', ['person', - 'organizationalPerson', - 'inetOrgPerson', - 'novaUser']), - ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), - ('sn', [name]), - ('cn', [name]), - ('secretKey', [secret_key]), - ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), - ] - self.conn.add_s(self.__uid_to_dn(name), attr) - return self.__to_user(dict(attr)) + if FLAGS.ldap_user_modify_only: + if self.__ldap_user_exists(name): + # Retrieve user by name + user = self.__get_ldap_user(name) + if user.has_key('accessKey') and user.has_key('secretKey') and user.has_key('isAdmin'): + raise exception.Duplicate("LDAP user %s already exists" % name) + else: + # Entry could be malformed, test for missing attrs. + # Malformed entries are useless, replace attributes found. + attr = [] + if user.has_key('secretKey'): + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) + else: + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) + if user.has_key('accessKey'): + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) + else: + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) + if user.has_key('isAdmin'): + attr.append((self.ldap.MOD_REPLACE, 'isAdmin', [str(is_admin).upper()])) + else: + attr.append((self.ldap.MOD_ADD, 'isAdmin', [str(is_admin).upper()])) + self.conn.modify_s(self.__uid_to_dn(name), attr) + return self.get_user(name) + else: + attr = [ + ('objectclass', ['person', + 'organizationalPerson', + 'inetOrgPerson', + 'novaUser']), + ('ou', [FLAGS.ldap_user_unit]), + ('uid', [name]), + ('sn', [name]), + ('cn', [name]), + ('secretKey', [secret_key]), + ('accessKey', [access_key]), + ('isAdmin', [str(is_admin).upper()]), + ] + self.conn.add_s(self.__uid_to_dn(name), attr) + return self.__to_user(dict(attr)) def create_project(self, name, manager_uid, description=None, member_uids=None): @@ -256,7 +287,21 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s doesn't exist" % uid) self.__remove_from_all(uid) - self.conn.delete_s(self.__uid_to_dn(uid)) + if FLAGS.ldap_user_modify_only: + # Delete attributes + attr = [] + # Retrieve user by name + user = self.__get_ldap_user(uid) + if user.has_key('secretKey'): + attr.append((self.ldap.MOD_DELETE, 'secretKey', user['secretKey'])) + if user.has_key('accessKey'): + attr.append((self.ldap.MOD_DELETE, 'accessKey', user['accessKey'])) + if user.has_key('isAdmin'): + attr.append((self.ldap.MOD_DELETE, 'isAdmin', user['isAdmin'])) + self.conn.modify_s(self.__uid_to_dn(uid), attr) + else: + # Delete entry + self.conn.delete_s(self.__uid_to_dn(uid)) def delete_project(self, project_id): """Delete a project""" @@ -265,7 +310,7 @@ class LdapDriver(object): self.__delete_group(project_dn) def modify_user(self, uid, access_key=None, secret_key=None, admin=None): - """Modify an existing project""" + """Modify an existing user""" if not access_key and not secret_key and admin is None: return attr = [] @@ -281,10 +326,20 @@ class LdapDriver(object): """Check if user exists""" return self.get_user(uid) != None + def __ldap_user_exists(self, uid): + """Check if the user exists in ldap""" + return self.__get_ldap_user(uid) != None + def __project_exists(self, project_id): """Check if project exists""" return self.get_project(project_id) != None + def __get_ldap_user(self, uid): + """Retrieve LDAP user entry by id""" + attr = self.__find_object(self.__uid_to_dn(uid), + '(objectclass=novaUser)') + return attr + def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" objects = self.__find_objects(dn, query, scope) @@ -449,12 +504,15 @@ class LdapDriver(object): """Convert ldap attributes to User object""" if attr == None: return None - return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], - 'access': attr['accessKey'][0], - 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + if (attr.has_key('accessKey') and attr.has_key('secretKey') and attr.has_key('isAdmin')): + return { + 'id': attr['uid'][0], + 'name': attr['cn'][0], + 'access': attr['accessKey'][0], + 'secret': attr['secretKey'][0], + 'admin': (attr['isAdmin'][0] == 'TRUE')} + else: + return None def __to_project(self, attr): """Convert ldap attributes to Project object""" From 8a7e6e0f003e1b3837b918ac9af1564ac1665aae Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 26 Nov 2010 17:59:48 +0000 Subject: [PATCH 21/83] PEP fixes --- nova/auth/ldapdriver.py | 72 ++++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 30 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95519d000576..fa48c8435ff3 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -91,7 +91,7 @@ class LdapDriver(object): def get_user(self, uid): """Retrieve user by id""" - attr = self.__get_ldap_user(uid) + attr = self.__get_ldap_user(uid) return self.__to_user(attr) def get_user_from_access_key(self, access): @@ -111,11 +111,11 @@ class LdapDriver(object): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') - users = [] - for attr in attrs: - user = self.__to_user(attr) - if user != None: - users.append(user) + users = [] + for attr in attrs: + user = self.__to_user(attr) + if user is not None: + users.append(user) return users def get_projects(self, uid=None): @@ -135,24 +135,32 @@ class LdapDriver(object): if self.__ldap_user_exists(name): # Retrieve user by name user = self.__get_ldap_user(name) - if user.has_key('accessKey') and user.has_key('secretKey') and user.has_key('isAdmin'): - raise exception.Duplicate("LDAP user %s already exists" % name) + if user.has_key('accessKey') and user.has_key('secretKey') \ + and user.has_key('isAdmin'): + raise exception.Duplicate("LDAP user %s already exists" \ + % name) else: # Entry could be malformed, test for missing attrs. # Malformed entries are useless, replace attributes found. attr = [] if user.has_key('secretKey'): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) + attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + [secret_key])) else: - attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) + attr.append((self.ldap.MOD_ADD, 'secretKey', \ + [secret_key])) if user.has_key('accessKey'): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) + attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + [access_key])) else: - attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) + attr.append((self.ldap.MOD_ADD, 'accessKey', \ + [access_key])) if user.has_key('isAdmin'): - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', [str(is_admin).upper()])) + attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ + [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, 'isAdmin', [str(is_admin).upper()])) + attr.append((self.ldap.MOD_ADD, 'isAdmin', \ + [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -186,7 +194,7 @@ class LdapDriver(object): if description is None: description = name members = [] - if member_uids != None: + if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Project can't be created " @@ -293,11 +301,14 @@ class LdapDriver(object): # Retrieve user by name user = self.__get_ldap_user(uid) if user.has_key('secretKey'): - attr.append((self.ldap.MOD_DELETE, 'secretKey', user['secretKey'])) + attr.append((self.ldap.MOD_DELETE, 'secretKey', \ + user['secretKey'])) if user.has_key('accessKey'): - attr.append((self.ldap.MOD_DELETE, 'accessKey', user['accessKey'])) + attr.append((self.ldap.MOD_DELETE, 'accessKey', \ + user['accessKey'])) if user.has_key('isAdmin'): - attr.append((self.ldap.MOD_DELETE, 'isAdmin', user['isAdmin'])) + attr.append((self.ldap.MOD_DELETE, 'isAdmin', \ + user['isAdmin'])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -324,18 +335,18 @@ class LdapDriver(object): def __user_exists(self, uid): """Check if user exists""" - return self.get_user(uid) != None + return self.get_user(uid) is not None def __ldap_user_exists(self, uid): """Check if the user exists in ldap""" - return self.__get_ldap_user(uid) != None + return self.__get_ldap_user(uid) is not None def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(project_id) != None + return self.get_project(project_id) is not None def __get_ldap_user(self, uid): - """Retrieve LDAP user entry by id""" + """Retrieve LDAP user entry by id""" attr = self.__find_object(self.__uid_to_dn(uid), '(objectclass=novaUser)') return attr @@ -385,12 +396,12 @@ class LdapDriver(object): def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') != None + return self.__find_object(dn, '(objectclass=groupOfNames)') is not None @staticmethod def __role_to_dn(role, project_id=None): """Convert role to corresponding dn""" - if project_id == None: + if project_id is None: return FLAGS.__getitem__("ldap_%s" % role).value else: return 'cn=%s,cn=%s,%s' % (role, @@ -404,7 +415,7 @@ class LdapDriver(object): raise exception.Duplicate("Group can't be created because " "group %s already exists" % name) members = [] - if member_uids != None: + if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " @@ -430,7 +441,7 @@ class LdapDriver(object): res = self.__find_object(group_dn, '(member=%s)' % self.__uid_to_dn(uid), self.ldap.SCOPE_BASE) - return res != None + return res is not None def __add_to_group(self, uid, group_dn): """Add user to group""" @@ -502,21 +513,22 @@ class LdapDriver(object): @staticmethod def __to_user(attr): """Convert ldap attributes to User object""" - if attr == None: + if attr is None: return None - if (attr.has_key('accessKey') and attr.has_key('secretKey') and attr.has_key('isAdmin')): + if (attr.has_key('accessKey') and attr.has_key('secretKey') \ + and attr.has_key('isAdmin')): return { 'id': attr['uid'][0], 'name': attr['cn'][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], 'admin': (attr['isAdmin'][0] == 'TRUE')} - else: + else: return None def __to_project(self, attr): """Convert ldap attributes to Project object""" - if attr == None: + if attr is None: return None member_dns = attr.get('member', []) return { From a44ee54dfe3f243a44636e9224082e86fdee452f Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 27 Nov 2010 12:56:19 +0000 Subject: [PATCH 22/83] first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later. --- nova/virt/xenapi.py | 321 ++++------------------- nova/virt/xenapi/power_state.py | 26 ++ nova/virt/xenapi/xenapi.py | 439 ++++++++++++++++++++++++++++++++ 3 files changed, 514 insertions(+), 272 deletions(-) create mode 100644 nova/virt/xenapi/power_state.py create mode 100644 nova/virt/xenapi/xenapi.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 3169562a5318..93c11920567d 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -54,14 +54,9 @@ from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task -from nova import db -from nova import flags -from nova import process -from nova import utils -from nova.auth.manager import AuthManager -from nova.compute import instance_types -from nova.compute import power_state -from nova.virt import images +from xenapi import power_state +from xenapi import vmops +from xenapi import volumeops XenAPI = None @@ -86,14 +81,6 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'connection_type=xenapi.') -XENAPI_POWER_STATE = { - 'Halted': power_state.SHUTDOWN, - 'Running': power_state.RUNNING, - 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed': power_state.CRASHED} - - def get_connection(_): """Note that XenAPI doesn't have a read-only connection mode, so the read_only parameter is ignored.""" @@ -115,273 +102,83 @@ def get_connection(_): class XenAPIConnection(object): def __init__(self, url, user, pw): - self._conn = XenAPI.Session(url) - self._conn.login_with_password(user, pw) - + self._session = XenAPISession(url, user, pw) + self._vmops = VMOps(sef._session) + self._volumeops = volumeOps(self._session) + def list_instances(self): - return [self._conn.xenapi.VM.get_name_label(vm) \ - for vm in self._conn.xenapi.VM.get_all()] - - @defer.inlineCallbacks + return self._vmops.list_instances() + def spawn(self, instance): - vm = yield self._lookup(instance.name) - if vm is not None: - raise Exception('Attempted to create non-unique name %s' % - instance.name) - - network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) - - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( - instance.image_id, user, project, True) - kernel = yield self._fetch_image( - instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) - if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) - logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - - @defer.inlineCallbacks - def _create_vm(self, instance, kernel, ramdisk): - """Create a VM record. Returns a Deferred that gives the new - VM reference.""" - - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] - mem = str(long(instance_type['memory_mb']) * 1024 * 1024) - vcpus = str(instance_type['vcpus']) - rec = { - 'name_label': instance.name, - 'name_description': '', - 'is_a_template': False, - 'memory_static_min': '0', - 'memory_static_max': mem, - 'memory_dynamic_min': mem, - 'memory_dynamic_max': mem, - 'VCPUs_at_startup': vcpus, - 'VCPUs_max': vcpus, - 'VCPUs_params': {}, - 'actions_after_shutdown': 'destroy', - 'actions_after_reboot': 'restart', - 'actions_after_crash': 'destroy', - 'PV_bootloader': '', - 'PV_kernel': kernel, - 'PV_ramdisk': ramdisk, - 'PV_args': 'root=/dev/xvda1', - 'PV_bootloader_args': '', - 'PV_legacy_args': '', - 'HVM_boot_policy': '', - 'HVM_boot_params': {}, - 'platform': {}, - 'PCI_bus': '', - 'recommendations': '', - 'affinity': '', - 'user_version': '0', - 'other_config': {}, - } - logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) - - @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): - """Create a VBD record. Returns a Deferred that gives the new - VBD reference.""" - - vbd_rec = {} - vbd_rec['VM'] = vm_ref - vbd_rec['VDI'] = vdi_ref - vbd_rec['userdevice'] = str(userdevice) - vbd_rec['bootable'] = bootable - vbd_rec['mode'] = 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) - logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, - vdi_ref) - defer.returnValue(vbd_ref) - - @defer.inlineCallbacks - def _create_vif(self, vm_ref, network_ref, mac_address): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - - vif_rec = {} - vif_rec['device'] = '0' - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = '' - vif_rec['qos_algorithm_params'] = {} - logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, - network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) - logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, - vm_ref, network_ref) - defer.returnValue(vif_ref) - - @defer.inlineCallbacks - def _find_network_with_bridge(self, bridge): - expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) - if len(networks) == 1: - defer.returnValue(networks.keys()[0]) - elif len(networks) > 1: - raise Exception('Found non-unique network for bridge %s' % bridge) - else: - raise Exception('Found no network for bridge %s' % bridge) - - @defer.inlineCallbacks - def _fetch_image(self, image, user, project, use_sr): - """use_sr: True to put the image as a VDI in an SR, False to place - it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used). - Returns a Deferred that gives the new VDI UUID.""" - - url = images.image_url(image) - access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s" % (url, access)) - fn = use_sr and 'get_vdi' or 'get_kernel' - args = {} - args['src_url'] = url - args['username'] = access - args['password'] = user.secret - if use_sr: - args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) - - @defer.inlineCallbacks + self._vmops.spawn(instance) + def reboot(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) + self._vmops.reboot(instance) - @defer.inlineCallbacks def destroy(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. - defer.returnValue(None) - # Get the VDIs related to the VM - vdis = yield self._lookup_vm_vdis(vm) - try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - # Disk clean-up - if vdis: - for vdi in vdis: - try: - task = yield self._call_xenapi('Async.VDI.destroy', vdi) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - + self._vmops.destroy(instance) + def get_info(self, instance_id): - vm = self._lookup_blocking(instance_id) - if vm is None: - raise Exception('instance not present %s' % instance_id) - rec = self._conn.xenapi.VM.get_record(vm) - return {'state': XENAPI_POWER_STATE[rec['power_state']], - 'max_mem': long(rec['memory_static_max']) >> 10, - 'mem': long(rec['memory_dynamic_max']) >> 10, - 'num_cpu': rec['VCPUs_max'], - 'cpu_time': 0} - + return self._vmops.get_info(instance_id) + def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' + return self._vmops.get_console_output(instance) + + def attach_volume(self, instance_name, device_path, mountpoint): + return self._volumeops.attach_volume(instance_name, device_path, mountpoint) + + def detach_volume(self, instance_name, mountpoint): + return self._volumeops.detach_volume(instance_name, mountpoint) + + +class XenAPISession(object): + def __init__(self, url, user, pw): + self._session = XenAPI.Session(url) + self._session.login_with_password(user, pw) @utils.deferredToThread - def _lookup(self, i): - return self._lookup_blocking(i) - - def _lookup_blocking(self, i): - vms = self._conn.xenapi.VM.get_by_name_label(i) - n = len(vms) - if n == 0: - return None - elif n > 1: - raise Exception('duplicate name found: %s' % i) - else: - return vms[0] + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + f = self._session.xenapi + for m in method.split('.'): + f = f.__getattr__(m) + return f(*args) @utils.deferredToThread - def _lookup_vm_vdis(self, vm): - return self._lookup_vm_vdis_blocking(vm) + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + return _unwrap_plugin_exceptions( + self._session.xenapi.Async.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) - def _lookup_vm_vdis_blocking(self, vm): - # Firstly we get the VBDs, then the VDIs. - # TODO: do we leave the read-only devices? - vbds = self._conn.xenapi.VM.get_VBDs(vm) - vdis = [] - if vbds: - for vbd in vbds: - try: - vdi = self._conn.xenapi.VBD.get_VDI(vbd) - # Test valid VDI - record = self._conn.xenapi.VDI.get_record(vdi) - except Exception, exc: - logging.warn(exc) - else: - vdis.append(vdi) - if len(vdis) > 0: - return vdis - else: - return None + def get_xenapi_host(self): + return self._session.xenapi.session.get_this_host(self._session.handle) - def _wait_for_task(self, task): + def wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" d = defer.Deferred() reactor.callLater(0, self._poll_task, task, d) return d - + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: #logging.debug('Polling task %s...', task) - status = self._conn.xenapi.task.get_status(task) + status = self._session.xenapi.task.get_status(task) if status == 'pending': reactor.callLater(FLAGS.xenapi_task_poll_interval, self._poll_task, task, deferred) elif status == 'success': - result = self._conn.xenapi.task.get_result(task) + result = self._session.xenapi.task.get_result(task) logging.info('Task %s status: success. %s', task, result) deferred.callback(_parse_xmlrpc_value(result)) else: - error_info = self._conn.xenapi.task.get_error_info(task) + error_info = self._session.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) deferred.errback(XenAPI.Failure(error_info)) @@ -390,26 +187,6 @@ class XenAPIConnection(object): logging.warn(exc) deferred.errback(exc) - @utils.deferredToThread - def _call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - f = self._conn.xenapi - for m in method.split('.'): - f = f.__getattr__(m) - return f(*args) - - @utils.deferredToThread - def _async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - return _unwrap_plugin_exceptions( - self._conn.xenapi.Async.host.call_plugin, - self._get_xenapi_host(), plugin, fn, args) - - def _get_xenapi_host(self): - return self._conn.xenapi.session.get_this_host(self._conn.handle) - def _unwrap_plugin_exceptions(func, *args, **kwargs): try: diff --git a/nova/virt/xenapi/power_state.py b/nova/virt/xenapi/power_state.py new file mode 100644 index 000000000000..d2d8fba42518 --- /dev/null +++ b/nova/virt/xenapi/power_state.py @@ -0,0 +1,26 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.compute import power_state + + +XENAPI_POWER_STATE = { + 'Halted': power_state.SHUTDOWN, + 'Running': power_state.RUNNING, + 'Paused': power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed': power_state.CRASHED} + \ No newline at end of file diff --git a/nova/virt/xenapi/xenapi.py b/nova/virt/xenapi/xenapi.py new file mode 100644 index 000000000000..ddbef4303fac --- /dev/null +++ b/nova/virt/xenapi/xenapi.py @@ -0,0 +1,439 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to XenServer or Xen Cloud Platform. + +The concurrency model for this class is as follows: + +All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator +deferredToThread). They are remote calls, and so may hang for the usual +reasons. They should not be allowed to block the reactor thread. + +All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. Polling is handled using reactor.callLater. + +This combination of techniques means that we don't block the reactor thread at +all, and at the same time we don't hold lots of threads waiting for +long-running operations. + +FIXME: get_info currently doesn't conform to these rules, and will block the +reactor thread if the VM.get_by_name_label or VM.get_record calls block. + +**Related Flags** + +:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. +:xenapi_connection_username: Username for connection to XenServer/Xen Cloud + Platform (default: root). +:xenapi_connection_password: Password for connection to XenServer/Xen Cloud + Platform. +:xenapi_task_poll_interval: The interval (seconds) used for polling of + remote tasks (Async.VM.start, etc) + (default: 0.5). + +""" + +import logging +import xmlrpclib + +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import task + +from nova import db +from nova import flags +from nova import process +from nova import utils +from nova.auth.manager import AuthManager # wrap this one +from nova.compute import instance_types # wrap this one +from xenapi import power_state +from nova.virt import images # wrap this one + +XenAPI = None + + +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_connection_url', + None, + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_username', + 'root', + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_password', + None, + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks ' + '(Async.VM.start, etc). Used only if ' + 'connection_type=xenapi.') + + +def get_connection(_): + """Note that XenAPI doesn't have a read-only connection mode, so + the read_only parameter is ignored.""" + # This is loaded late so that there's no need to install this + # library when not using XenAPI. + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + if not url or password is None: + raise Exception('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi') + return XenAPIConnection(url, username, password) + + +class XenAPISession(object): + def __init__(self, url, user, pw): + self._session = XenAPI.Session(url) + self._session.login_with_password(user, pw) + + def session(self): + return self._session + + def list_instances(self): + return [self._session.xenapi.VM.get_name_label(vm) \ + for vm in self._session.xenapi.VM.get_all()] + + @defer.inlineCallbacks + def spawn(self, instance): + vm = yield self._lookup(instance.name) + if vm is not None: + raise Exception('Attempted to create non-unique name %s' % + instance.name) + + network = db.project_get_network(None, instance.project_id) + network_ref = \ + yield self._find_network_with_bridge(network.bridge) + + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + vdi_uuid = yield self._fetch_image( + instance.image_id, user, project, True) + kernel = yield self._fetch_image( + instance.kernel_id, user, project, False) + ramdisk = yield self._fetch_image( + instance.ramdisk_id, user, project, False) + vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) + + vm_ref = yield self._create_vm(instance, kernel, ramdisk) + yield self._create_vbd(vm_ref, vdi_ref, 0, True) + if network_ref: + yield self._create_vif(vm_ref, network_ref, instance.mac_address) + logging.debug('Starting VM %s...', vm_ref) + yield self._call_xenapi('VM.start', vm_ref, False, False) + logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + + @defer.inlineCallbacks + def _create_vm(self, instance, kernel, ramdisk): + """Create a VM record. Returns a Deferred that gives the new + VM reference.""" + + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + mem = str(long(instance_type['memory_mb']) * 1024 * 1024) + vcpus = str(instance_type['vcpus']) + rec = { + 'name_label': instance.name, + 'name_description': '', + 'is_a_template': False, + 'memory_static_min': '0', + 'memory_static_max': mem, + 'memory_dynamic_min': mem, + 'memory_dynamic_max': mem, + 'VCPUs_at_startup': vcpus, + 'VCPUs_max': vcpus, + 'VCPUs_params': {}, + 'actions_after_shutdown': 'destroy', + 'actions_after_reboot': 'restart', + 'actions_after_crash': 'destroy', + 'PV_bootloader': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', + 'PV_bootloader_args': '', + 'PV_legacy_args': '', + 'HVM_boot_policy': '', + 'HVM_boot_params': {}, + 'platform': {}, + 'PCI_bus': '', + 'recommendations': '', + 'affinity': '', + 'user_version': '0', + 'other_config': {}, + } + logging.debug('Created VM %s...', instance.name) + vm_ref = yield self._call_xenapi('VM.create', rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + defer.returnValue(vm_ref) + + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference.""" + + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + defer.returnValue(vbd_ref) + + @defer.inlineCallbacks + def _create_vif(self, vm_ref, network_ref, mac_address): + """Create a VIF record. Returns a Deferred that gives the new + VIF reference.""" + + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = yield self._call_xenapi('VIF.create', vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + defer.returnValue(vif_ref) + + @defer.inlineCallbacks + def _find_network_with_bridge(self, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = yield self._call_xenapi('network.get_all_records_where', + expr) + if len(networks) == 1: + defer.returnValue(networks.keys()[0]) + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + @defer.inlineCallbacks + def _fetch_image(self, image, user, project, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used). + Returns a Deferred that gives the new VDI UUID.""" + + url = images.image_url(image) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + task = yield self._async_call_plugin('objectstore', fn, args) + uuid = yield self._wait_for_task(task) + defer.returnValue(uuid) + + @defer.inlineCallbacks + def reboot(self, instance): + vm = yield self._lookup(instance.name) + if vm is None: + raise Exception('instance not present %s' % instance.name) + task = yield self._call_xenapi('Async.VM.clean_reboot', vm) + yield self._wait_for_task(task) + + @defer.inlineCallbacks + def destroy(self, instance): + vm = yield self._lookup(instance.name) + if vm is None: + # Don't complain, just return. This lets us clean up instances + # that have already disappeared from the underlying platform. + defer.returnValue(None) + # Get the VDIs related to the VM + vdis = yield self._lookup_vm_vdis(vm) + try: + task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + # Disk clean-up + if vdis: + for vdi in vdis: + try: + task = yield self._call_xenapi('Async.VDI.destroy', vdi) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + try: + task = yield self._call_xenapi('Async.VM.destroy', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + + def get_info(self, instance_id): + vm = self._lookup_blocking(instance_id) + if vm is None: + raise Exception('instance not present %s' % instance_id) + rec = self._session.xenapi.VM.get_record(vm) + return {'state': XENAPI_POWER_STATE[rec['power_state']], + 'max_mem': long(rec['memory_static_max']) >> 10, + 'mem': long(rec['memory_dynamic_max']) >> 10, + 'num_cpu': rec['VCPUs_max'], + 'cpu_time': 0} + + def get_console_output(self, instance): + return 'FAKE CONSOLE OUTPUT' + + @utils.deferredToThread + def _lookup(self, i): + return self._lookup_blocking(i) + + def _lookup_blocking(self, i): + vms = self._session.xenapi.VM.get_by_name_label(i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception('duplicate name found: %s' % i) + else: + return vms[0] + + @utils.deferredToThread + def _lookup_vm_vdis(self, vm): + return self._lookup_vm_vdis_blocking(vm) + + def _lookup_vm_vdis_blocking(self, vm): + # Firstly we get the VBDs, then the VDIs. + # TODO: do we leave the read-only devices? + vbds = self._session.xenapi.VM.get_VBDs(vm) + vdis = [] + if vbds: + for vbd in vbds: + try: + vdi = self._session.xenapi.VBD.get_VDI(vbd) + # Test valid VDI + record = self._session.xenapi.VDI.get_record(vdi) + except Exception, exc: + logging.warn(exc) + else: + vdis.append(vdi) + if len(vdis) > 0: + return vdis + else: + return None + + def _wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + d = defer.Deferred() + reactor.callLater(0, self._poll_task, task, d) + return d + + @utils.deferredToThread + def _poll_task(self, task, deferred): + """Poll the given XenAPI task, and fire the given Deferred if we + get a result.""" + try: + #logging.debug('Polling task %s...', task) + status = self._session.xenapi.task.get_status(task) + if status == 'pending': + reactor.callLater(FLAGS.xenapi_task_poll_interval, + self._poll_task, task, deferred) + elif status == 'success': + result = self._session.xenapi.task.get_result(task) + logging.info('Task %s status: success. %s', task, result) + deferred.callback(_parse_xmlrpc_value(result)) + else: + error_info = self._session.xenapi.task.get_error_info(task) + logging.warn('Task %s status: %s. %s', task, status, + error_info) + deferred.errback(XenAPI.Failure(error_info)) + #logging.debug('Polling task %s done.', task) + except Exception, exc: + logging.warn(exc) + deferred.errback(exc) + + @utils.deferredToThread + def _call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + f = self._session.xenapi + for m in method.split('.'): + f = f.__getattr__(m) + return f(*args) + + @utils.deferredToThread + def _async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + return _unwrap_plugin_exceptions( + self._session.xenapi.Async.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) + + def _get_xenapi_host(self): + return self._session.xenapi.session.get_this_host(self._session.handle) + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, exc: + logging.debug("Got exception: %s", exc) + if (len(exc.details) == 4 and + exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exc.details[2] == 'Failure'): + params = None + try: + params = eval(exc.details[3]) + except: + raise exc + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exc: + logging.debug("Got exception: %s", exc) + raise + + +def _parse_xmlrpc_value(val): + """Parse the given value as if it were an XML-RPC value. This is + sometimes used as the format for the task.result field.""" + if not val: + return val + x = xmlrpclib.loads( + '' + + val + + '') + return x[0][0] From 541f8ce212a33d14ac5ba48b3dde6c43a60bc368 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 27 Nov 2010 13:33:38 +0000 Subject: [PATCH 23/83] typos and pep8 fixes --- nova/virt/xenapi.py | 35 +++++++++++++++++++-------------- nova/virt/xenapi/power_state.py | 1 - 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 93c11920567d..a17894c84946 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -55,8 +55,8 @@ from twisted.internet import reactor from twisted.internet import task from xenapi import power_state -from xenapi import vmops -from xenapi import volumeops +from xenapi import VMOps +from xenapi import VolumeOps XenAPI = None @@ -102,30 +102,32 @@ def get_connection(_): class XenAPIConnection(object): def __init__(self, url, user, pw): - self._session = XenAPISession(url, user, pw) - self._vmops = VMOps(sef._session) - self._volumeops = volumeOps(self._session) - + session = XenAPISession(url, user, pw) + self._vmops = VMOps(session) + self._volumeops = VolumeOps(session) + def list_instances(self): return self._vmops.list_instances() - + def spawn(self, instance): self._vmops.spawn(instance) - + def reboot(self, instance): - self._vmops.reboot(instance) + self._vmops.reboot(instance) def destroy(self, instance): self._vmops.destroy(instance) - + def get_info(self, instance_id): return self._vmops.get_info(instance_id) - + def get_console_output(self, instance): - return self._vmops.get_console_output(instance) - + return self._vmops.get_console_output(instance) + def attach_volume(self, instance_name, device_path, mountpoint): - return self._volumeops.attach_volume(instance_name, device_path, mountpoint) + return self._volumeops.attach_volume(instance_name, + device_path, + mountpoint) def detach_volume(self, instance_name, mountpoint): return self._volumeops.detach_volume(instance_name, mountpoint) @@ -136,6 +138,9 @@ class XenAPISession(object): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) + def get_session(self): + return self._session + @utils.deferredToThread def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -162,7 +167,7 @@ class XenAPISession(object): d = defer.Deferred() reactor.callLater(0, self._poll_task, task, d) return d - + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we diff --git a/nova/virt/xenapi/power_state.py b/nova/virt/xenapi/power_state.py index d2d8fba42518..5892f0f485c7 100644 --- a/nova/virt/xenapi/power_state.py +++ b/nova/virt/xenapi/power_state.py @@ -23,4 +23,3 @@ XENAPI_POWER_STATE = { 'Paused': power_state.PAUSED, 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} - \ No newline at end of file From b6bed02342ac716b3cb3847fb54b5f285995f3b7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sun, 28 Nov 2010 01:49:28 +0000 Subject: [PATCH 24/83] further refactoring --- nova/virt/xenapi.py | 15 +- nova/virt/xenapi/network_utils.py | 52 ++++ nova/virt/xenapi/vm_utils.py | 206 ++++++++++++++ nova/virt/xenapi/vmops.py | 126 +++++++++ nova/virt/xenapi/xenapi.py | 439 ------------------------------ 5 files changed, 391 insertions(+), 447 deletions(-) create mode 100644 nova/virt/xenapi/network_utils.py create mode 100644 nova/virt/xenapi/vm_utils.py create mode 100644 nova/virt/xenapi/vmops.py delete mode 100644 nova/virt/xenapi/xenapi.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index a17894c84946..2f2cef75ef91 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -54,7 +54,6 @@ from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task -from xenapi import power_state from xenapi import VMOps from xenapi import VolumeOps @@ -138,8 +137,11 @@ class XenAPISession(object): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) - def get_session(self): - return self._session + def get_xenapi(self): + return self._session.xenapi + + def get_xenapi_host(self): + return self._session.xenapi.session.get_this_host(self._session.handle) @utils.deferredToThread def call_xenapi(self, method, *args): @@ -149,17 +151,14 @@ class XenAPISession(object): for m in method.split('.'): f = f.__getattr__(m) return f(*args) - + @utils.deferredToThread def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" return _unwrap_plugin_exceptions( self._session.xenapi.Async.host.call_plugin, - self._get_xenapi_host(), plugin, fn, args) - - def get_xenapi_host(self): - return self._session.xenapi.session.get_this_host(self._session.handle) + self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): """Return a Deferred that will give the result of the given task. diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py new file mode 100644 index 000000000000..e062f916f037 --- /dev/null +++ b/nova/virt/xenapi/network_utils.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of network records and +their attributes like bridges, PIFs, QoS, as well as their lookup functions. +""" + +import logging +import xmlrpclib + +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import task + +from nova import db +from nova import flags +from nova import process +from nova import utils +from nova.auth.manager import AuthManager # wrap this one +from nova.compute import instance_types # wrap this one +from nova.virt import images # wrap this one + +import power_state + + +class NetworkHelper(): + @classmethod + @defer.inlineCallbacks + def find_network_with_bridge(self, session, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = yield session.call_xenapi('network.get_all_records_where', + expr) + if len(networks) == 1: + defer.returnValue(networks.keys()[0]) + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) \ No newline at end of file diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py new file mode 100644 index 000000000000..6fb409b26e7e --- /dev/null +++ b/nova/virt/xenapi/vm_utils.py @@ -0,0 +1,206 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of VM records and +their attributes like VDIs, VIFs, as well as their lookup functions. +""" + +import logging +import xmlrpclib + +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import task + +from nova import db +from nova import flags +from nova import process +from nova import utils +from nova.auth.manager import AuthManager # wrap this one +from nova.compute import instance_types # wrap this one +from nova.virt import images # wrap this one + +import power_state + + +class VMHelper(): + @classmethod + @defer.inlineCallbacks + def create_vm(self, session, instance, kernel, ramdisk): + """Create a VM record. Returns a Deferred that gives the new + VM reference.""" + + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + mem = str(long(instance_type['memory_mb']) * 1024 * 1024) + vcpus = str(instance_type['vcpus']) + rec = { + 'name_label': instance.name, + 'name_description': '', + 'is_a_template': False, + 'memory_static_min': '0', + 'memory_static_max': mem, + 'memory_dynamic_min': mem, + 'memory_dynamic_max': mem, + 'VCPUs_at_startup': vcpus, + 'VCPUs_max': vcpus, + 'VCPUs_params': {}, + 'actions_after_shutdown': 'destroy', + 'actions_after_reboot': 'restart', + 'actions_after_crash': 'destroy', + 'PV_bootloader': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', + 'PV_bootloader_args': '', + 'PV_legacy_args': '', + 'HVM_boot_policy': '', + 'HVM_boot_params': {}, + 'platform': {}, + 'PCI_bus': '', + 'recommendations': '', + 'affinity': '', + 'user_version': '0', + 'other_config': {}, + } + logging.debug('Created VM %s...', instance.name) + vm_ref = yield session.call_xenapi('VM.create', rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + defer.returnValue(vm_ref) + + @classmethod + @defer.inlineCallbacks + def create_vbd(self, session, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference.""" + + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + defer.returnValue(vbd_ref) + + @classmethod + @defer.inlineCallbacks + def create_vif(self, session, vm_ref, network_ref, mac_address): + """Create a VIF record. Returns a Deferred that gives the new + VIF reference.""" + + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + defer.returnValue(vif_ref) + + @classmethod + @defer.inlineCallbacks + def find_network_with_bridge(self, session, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = yield session.call_xenapi('network.get_all_records_where', + expr) + if len(networks) == 1: + defer.returnValue(networks.keys()[0]) + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + @classmethod + @defer.inlineCallbacks + def fetch_image(self, session, image, user, project, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used). + Returns a Deferred that gives the new VDI UUID.""" + + url = images.image_url(image) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + task = yield session.async_call_plugin('objectstore', fn, args) + uuid = yield session.wait_for_task(task) + defer.returnValue(uuid) + + @classmethod + @utils.deferredToThread + def lookup(self, session, i): + return VMHelper.lookup_blocking(i) + + @classmethod + def lookup_blocking(self, session, i): + vms = session.get_xenapi().VM.get_by_name_label(i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception('duplicate name found: %s' % i) + else: + return vms[0] + + @classmethod + @utils.deferredToThread + def lookup_vm_vdis(self, session, vm): + return VMHelper.lookup_vm_vdis_blocking(session, vm) + + @classmethod + def lookup_vm_vdis_blocking(self, session, vm): + # Firstly we get the VBDs, then the VDIs. + # TODO: do we leave the read-only devices? + vbds = session.get_xenapi().VM.get_VBDs(vm) + vdis = [] + if vbds: + for vbd in vbds: + try: + vdi = session.get_xenapi().VBD.get_VDI(vbd) + # Test valid VDI + record = session.get_xenapi().VDI.get_record(vdi) + except Exception, exc: + logging.warn(exc) + else: + vdis.append(vdi) + if len(vdis) > 0: + return vdis + else: + return None \ No newline at end of file diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py new file mode 100644 index 000000000000..03b7fc614fbb --- /dev/null +++ b/nova/virt/xenapi/vmops.py @@ -0,0 +1,126 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Management class for VM-related functions (spawn, reboot, etc). +""" + +import logging +import xmlrpclib + +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import task + +from nova import db +from nova import flags +from nova import process +from nova import utils +from nova.auth.manager import AuthManager # wrap this one +from nova.compute import instance_types # wrap this one +from nova.virt import images # wrap this one + +import power_state +import VMHelper +import NetworkHelper + + +class VMOps(object): + def __init__(self, session): + self._session = session + + def list_instances(self): + return [self._session.get_xenapi().VM.get_name_label(vm) \ + for vm in self._session.get_xenapi().VM.get_all()] + + @defer.inlineCallbacks + def spawn(self, instance): + vm = yield VMHelper.lookup(self._session, instance.name) + if vm is not None: + raise Exception('Attempted to create non-unique name %s' % + instance.name) + + network = db.project_get_network(None, instance.project_id) + network_ref = \ + yield NetworkHelper.find_network_with_bridge(self._session, network.bridge) + + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + vdi_uuid = yield VMHelper.fetch_image(self._session, + instance.image_id, user, project, True) + kernel = yield VMHelper.fetch_image(self._session, + instance.kernel_id, user, project, False) + ramdisk = yield VMHelper.fetch_image(self._session, + instance.ramdisk_id, user, project, False) + vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + + vm_ref = yield VMHelper.create_vm(self._session, instance, kernel, ramdisk) + yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + if network_ref: + yield VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) + logging.debug('Starting VM %s...', vm_ref) + yield self._session.call_xenapi('VM.start', vm_ref, False, False) + logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + + @defer.inlineCallbacks + def reboot(self, instance): + vm = yield VMHelper.lookup(self._session, instance.name) + if vm is None: + raise Exception('instance not present %s' % instance.name) + task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) + yield self._session.wait_for_task(task) + + @defer.inlineCallbacks + def destroy(self, instance): + vm = yield VMHelper.lookup(self._session, instance.name) + if vm is None: + # Don't complain, just return. This lets us clean up instances + # that have already disappeared from the underlying platform. + defer.returnValue(None) + # Get the VDIs related to the VM + vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + try: + task = yield self._session.call_xenapi('Async.VM.hard_shutdown', vm) + yield self._session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + # Disk clean-up + if vdis: + for vdi in vdis: + try: + task = yield self._session.call_xenapi('Async.VDI.destroy', vdi) + yield self._session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + try: + task = yield self._session.call_xenapi('Async.VM.destroy', vm) + yield self._session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + + def get_info(self, instance_id): + vm = VMHelper.lookup_blocking(self._session, instance_id) + if vm is None: + raise Exception('instance not present %s' % instance_id) + rec = self._session.get_xenapi().VM.get_record(vm) + return {'state': XENAPI_POWER_STATE[rec['power_state']], + 'max_mem': long(rec['memory_static_max']) >> 10, + 'mem': long(rec['memory_dynamic_max']) >> 10, + 'num_cpu': rec['VCPUs_max'], + 'cpu_time': 0} + + def get_console_output(self, instance): + return 'FAKE CONSOLE OUTPUT' \ No newline at end of file diff --git a/nova/virt/xenapi/xenapi.py b/nova/virt/xenapi/xenapi.py deleted file mode 100644 index ddbef4303fac..000000000000 --- a/nova/virt/xenapi/xenapi.py +++ /dev/null @@ -1,439 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A connection to XenServer or Xen Cloud Platform. - -The concurrency model for this class is as follows: - -All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator -deferredToThread). They are remote calls, and so may hang for the usual -reasons. They should not be allowed to block the reactor thread. - -All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async -(using XenAPI.VM.async_start etc). These return a task, which can then be -polled for completion. Polling is handled using reactor.callLater. - -This combination of techniques means that we don't block the reactor thread at -all, and at the same time we don't hold lots of threads waiting for -long-running operations. - -FIXME: get_info currently doesn't conform to these rules, and will block the -reactor thread if the VM.get_by_name_label or VM.get_record calls block. - -**Related Flags** - -:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. -:xenapi_connection_username: Username for connection to XenServer/Xen Cloud - Platform (default: root). -:xenapi_connection_password: Password for connection to XenServer/Xen Cloud - Platform. -:xenapi_task_poll_interval: The interval (seconds) used for polling of - remote tasks (Async.VM.start, etc) - (default: 0.5). - -""" - -import logging -import xmlrpclib - -from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task - -from nova import db -from nova import flags -from nova import process -from nova import utils -from nova.auth.manager import AuthManager # wrap this one -from nova.compute import instance_types # wrap this one -from xenapi import power_state -from nova.virt import images # wrap this one - -XenAPI = None - - -FLAGS = flags.FLAGS -flags.DEFINE_string('xenapi_connection_url', - None, - 'URL for connection to XenServer/Xen Cloud Platform.' - ' Required if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_username', - 'root', - 'Username for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_password', - None, - 'Password for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_float('xenapi_task_poll_interval', - 0.5, - 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' - 'connection_type=xenapi.') - - -def get_connection(_): - """Note that XenAPI doesn't have a read-only connection mode, so - the read_only parameter is ignored.""" - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - global XenAPI - if XenAPI is None: - XenAPI = __import__('XenAPI') - url = FLAGS.xenapi_connection_url - username = FLAGS.xenapi_connection_username - password = FLAGS.xenapi_connection_password - if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') - return XenAPIConnection(url, username, password) - - -class XenAPISession(object): - def __init__(self, url, user, pw): - self._session = XenAPI.Session(url) - self._session.login_with_password(user, pw) - - def session(self): - return self._session - - def list_instances(self): - return [self._session.xenapi.VM.get_name_label(vm) \ - for vm in self._session.xenapi.VM.get_all()] - - @defer.inlineCallbacks - def spawn(self, instance): - vm = yield self._lookup(instance.name) - if vm is not None: - raise Exception('Attempted to create non-unique name %s' % - instance.name) - - network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) - - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( - instance.image_id, user, project, True) - kernel = yield self._fetch_image( - instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) - if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) - logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - - @defer.inlineCallbacks - def _create_vm(self, instance, kernel, ramdisk): - """Create a VM record. Returns a Deferred that gives the new - VM reference.""" - - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] - mem = str(long(instance_type['memory_mb']) * 1024 * 1024) - vcpus = str(instance_type['vcpus']) - rec = { - 'name_label': instance.name, - 'name_description': '', - 'is_a_template': False, - 'memory_static_min': '0', - 'memory_static_max': mem, - 'memory_dynamic_min': mem, - 'memory_dynamic_max': mem, - 'VCPUs_at_startup': vcpus, - 'VCPUs_max': vcpus, - 'VCPUs_params': {}, - 'actions_after_shutdown': 'destroy', - 'actions_after_reboot': 'restart', - 'actions_after_crash': 'destroy', - 'PV_bootloader': '', - 'PV_kernel': kernel, - 'PV_ramdisk': ramdisk, - 'PV_args': 'root=/dev/xvda1', - 'PV_bootloader_args': '', - 'PV_legacy_args': '', - 'HVM_boot_policy': '', - 'HVM_boot_params': {}, - 'platform': {}, - 'PCI_bus': '', - 'recommendations': '', - 'affinity': '', - 'user_version': '0', - 'other_config': {}, - } - logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) - - @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): - """Create a VBD record. Returns a Deferred that gives the new - VBD reference.""" - - vbd_rec = {} - vbd_rec['VM'] = vm_ref - vbd_rec['VDI'] = vdi_ref - vbd_rec['userdevice'] = str(userdevice) - vbd_rec['bootable'] = bootable - vbd_rec['mode'] = 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) - logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, - vdi_ref) - defer.returnValue(vbd_ref) - - @defer.inlineCallbacks - def _create_vif(self, vm_ref, network_ref, mac_address): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - - vif_rec = {} - vif_rec['device'] = '0' - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = '' - vif_rec['qos_algorithm_params'] = {} - logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, - network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) - logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, - vm_ref, network_ref) - defer.returnValue(vif_ref) - - @defer.inlineCallbacks - def _find_network_with_bridge(self, bridge): - expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) - if len(networks) == 1: - defer.returnValue(networks.keys()[0]) - elif len(networks) > 1: - raise Exception('Found non-unique network for bridge %s' % bridge) - else: - raise Exception('Found no network for bridge %s' % bridge) - - @defer.inlineCallbacks - def _fetch_image(self, image, user, project, use_sr): - """use_sr: True to put the image as a VDI in an SR, False to place - it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used). - Returns a Deferred that gives the new VDI UUID.""" - - url = images.image_url(image) - access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s" % (url, access)) - fn = use_sr and 'get_vdi' or 'get_kernel' - args = {} - args['src_url'] = url - args['username'] = access - args['password'] = user.secret - if use_sr: - args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) - - @defer.inlineCallbacks - def reboot(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) - - @defer.inlineCallbacks - def destroy(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. - defer.returnValue(None) - # Get the VDIs related to the VM - vdis = yield self._lookup_vm_vdis(vm) - try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - # Disk clean-up - if vdis: - for vdi in vdis: - try: - task = yield self._call_xenapi('Async.VDI.destroy', vdi) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - - def get_info(self, instance_id): - vm = self._lookup_blocking(instance_id) - if vm is None: - raise Exception('instance not present %s' % instance_id) - rec = self._session.xenapi.VM.get_record(vm) - return {'state': XENAPI_POWER_STATE[rec['power_state']], - 'max_mem': long(rec['memory_static_max']) >> 10, - 'mem': long(rec['memory_dynamic_max']) >> 10, - 'num_cpu': rec['VCPUs_max'], - 'cpu_time': 0} - - def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' - - @utils.deferredToThread - def _lookup(self, i): - return self._lookup_blocking(i) - - def _lookup_blocking(self, i): - vms = self._session.xenapi.VM.get_by_name_label(i) - n = len(vms) - if n == 0: - return None - elif n > 1: - raise Exception('duplicate name found: %s' % i) - else: - return vms[0] - - @utils.deferredToThread - def _lookup_vm_vdis(self, vm): - return self._lookup_vm_vdis_blocking(vm) - - def _lookup_vm_vdis_blocking(self, vm): - # Firstly we get the VBDs, then the VDIs. - # TODO: do we leave the read-only devices? - vbds = self._session.xenapi.VM.get_VBDs(vm) - vdis = [] - if vbds: - for vbd in vbds: - try: - vdi = self._session.xenapi.VBD.get_VDI(vbd) - # Test valid VDI - record = self._session.xenapi.VDI.get_record(vdi) - except Exception, exc: - logging.warn(exc) - else: - vdis.append(vdi) - if len(vdis) > 0: - return vdis - else: - return None - - def _wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - - @utils.deferredToThread - def _poll_task(self, task, deferred): - """Poll the given XenAPI task, and fire the given Deferred if we - get a result.""" - try: - #logging.debug('Polling task %s...', task) - status = self._session.xenapi.task.get_status(task) - if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) - elif status == 'success': - result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) - else: - error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) - except Exception, exc: - logging.warn(exc) - deferred.errback(exc) - - @utils.deferredToThread - def _call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - f = self._session.xenapi - for m in method.split('.'): - f = f.__getattr__(m) - return f(*args) - - @utils.deferredToThread - def _async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - return _unwrap_plugin_exceptions( - self._session.xenapi.Async.host.call_plugin, - self._get_xenapi_host(), plugin, fn, args) - - def _get_xenapi_host(self): - return self._session.xenapi.session.get_this_host(self._session.handle) - - -def _unwrap_plugin_exceptions(func, *args, **kwargs): - try: - return func(*args, **kwargs) - except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) - if (len(exc.details) == 4 and - exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and - exc.details[2] == 'Failure'): - params = None - try: - params = eval(exc.details[3]) - except: - raise exc - raise XenAPI.Failure(params) - else: - raise - except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) - raise - - -def _parse_xmlrpc_value(val): - """Parse the given value as if it were an XML-RPC value. This is - sometimes used as the format for the task.result field.""" - if not val: - return val - x = xmlrpclib.loads( - '' + - val + - '') - return x[0][0] From c10a6f3e97a5871ac0cdce97bde89b3cee59d336 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sun, 28 Nov 2010 15:12:37 +0000 Subject: [PATCH 25/83] other round of refactoring --- nova/virt/xenapi/novadeps.py | 97 +++++++++++++++++++ nova/virt/xenapi/vm_utils.py | 32 ++---- nova/virt/xenapi/vmops.py | 41 ++++---- .../xenapi/{power_state.py => volumeops.py} | 22 +++-- 4 files changed, 141 insertions(+), 51 deletions(-) create mode 100644 nova/virt/xenapi/novadeps.py rename nova/virt/xenapi/{power_state.py => volumeops.py} (64%) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py new file mode 100644 index 000000000000..a4e51226308c --- /dev/null +++ b/nova/virt/xenapi/novadeps.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import db +from nova import flags +from nova import process +from nova import utils + +from nova.compute import power_state +from nova.auth.manager import AuthManager +from nova.compute import instance_types +from nova.virt import images + +XENAPI_POWER_STATE = { + 'Halted': power_state.SHUTDOWN, + 'Running': power_state.RUNNING, + 'Paused': power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed': power_state.CRASHED} + +class Instance(object): + + @classmethod + def get_name(self, instance): + return instance.name + + @classmethod + def get_type(self, instance): + return instance_types.INSTANCE_TYPES[instance.instance_type] + + @classmethod + def get_project(self, instance): + return AuthManager().get_project(instance.project_id) + + @classmethod + def get_project_id(self, instance): + return instance.project_id + + @classmethod + def get_image_id(self, instance): + return instance.image_id + + @classmethod + def get_kernel_id(self, instance): + return instance.kernel_id + + @classmethod + def get_ramdisk_id(self, instance): + return instance.ramdisk_id + + @classmethod + def get_network(self, instance): + return db.project_get_network(None, instance.project_id) + + @classmethod + def get_mac(self, instance): + return instance.mac_address + + @classmethod + def get_user(self, instance): + return AuthManager().get_user(instance.user_id) + + +class Network(object): + + @classmethod + def get_bridge(self, network): + return network.bridge + +class Image(object): + + @classmethod + def get_url(self, image): + return images.image_url(image) + +class User(object): + + @classmethod + def get_access(self, user, project): + return AuthManager().get_access_key(user, project) + + @classmethod + def get_secret(self, user): + return user.secret \ No newline at end of file diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6fb409b26e7e..8329f0d7e25f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -30,11 +30,10 @@ from nova import db from nova import flags from nova import process from nova import utils -from nova.auth.manager import AuthManager # wrap this one -from nova.compute import instance_types # wrap this one -from nova.virt import images # wrap this one -import power_state +from novadeps import Instance +from novadeps import Image +from novadeps import User class VMHelper(): @@ -44,7 +43,7 @@ class VMHelper(): """Create a VM record. Returns a Deferred that gives the new VM reference.""" - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + instance_type = Instance.get_type(instance) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { @@ -76,9 +75,9 @@ class VMHelper(): 'user_version': '0', 'other_config': {}, } - logging.debug('Created VM %s...', instance.name) + logging.debug('Created VM %s...', Instance.get_name(instance)) vm_ref = yield session.call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', instance.name, vm_ref) + logging.debug('Created VM %s as %s.', Instance.get_name(instance), vm_ref) defer.returnValue(vm_ref) @classmethod @@ -128,19 +127,6 @@ class VMHelper(): vm_ref, network_ref) defer.returnValue(vif_ref) - @classmethod - @defer.inlineCallbacks - def find_network_with_bridge(self, session, bridge): - expr = 'field "bridge" = "%s"' % bridge - networks = yield session.call_xenapi('network.get_all_records_where', - expr) - if len(networks) == 1: - defer.returnValue(networks.keys()[0]) - elif len(networks) > 1: - raise Exception('Found non-unique network for bridge %s' % bridge) - else: - raise Exception('Found no network for bridge %s' % bridge) - @classmethod @defer.inlineCallbacks def fetch_image(self, session, image, user, project, use_sr): @@ -149,14 +135,14 @@ class VMHelper(): its kernel and ramdisk (if external kernels are being used). Returns a Deferred that gives the new VDI UUID.""" - url = images.image_url(image) - access = AuthManager().get_access_key(user, project) + url = Image.get_url(image) + access = User.get_access(user, project) logging.debug("Asking xapi to fetch %s as %s" % (url, access)) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = User.get_secret(user) if use_sr: args['add_partition'] = 'true' task = yield session.async_call_plugin('objectstore', fn, args) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 03b7fc614fbb..abb42250202a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,18 +25,14 @@ from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task -from nova import db -from nova import flags -from nova import process -from nova import utils -from nova.auth.manager import AuthManager # wrap this one -from nova.compute import instance_types # wrap this one -from nova.virt import images # wrap this one -import power_state import VMHelper import NetworkHelper +from novadeps import XENAPI_POWER_STATE +from novadeps import Auth +from novadeps import Instance +from novadeps import Network class VMOps(object): def __init__(self, session): @@ -48,44 +44,45 @@ class VMOps(object): @defer.inlineCallbacks def spawn(self, instance): - vm = yield VMHelper.lookup(self._session, instance.name) + vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) if vm is not None: raise Exception('Attempted to create non-unique name %s' % - instance.name) + Instance.get_name(instance)) - network = db.project_get_network(None, instance.project_id) + network = Instance.get_network(instance) network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, network.bridge) + yield NetworkHelper.find_network_with_bridge(self._session, Network.get_bridge(network)) - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) + user = Instance.get_user(instance) + project = Instance.get_project(instance) vdi_uuid = yield VMHelper.fetch_image(self._session, - instance.image_id, user, project, True) + Instance.get_image_id(instance), user, project, True) kernel = yield VMHelper.fetch_image(self._session, - instance.kernel_id, user, project, False) + Instance.get_kernel_id(instance), user, project, False) ramdisk = yield VMHelper.fetch_image(self._session, - instance.ramdisk_id, user, project, False) + Instance.get_ramdisk_id(instance), user, project, False) vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield VMHelper.create_vm(self._session, instance, kernel, ramdisk) yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) + yield VMHelper.create_vif(self._session, vm_ref, network_ref, Instance.get_mac(instance)) logging.debug('Starting VM %s...', vm_ref) yield self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + logging.info('Spawning VM %s created %s.', Instance.get_name(instance), vm_ref) @defer.inlineCallbacks def reboot(self, instance): - vm = yield VMHelper.lookup(self._session, instance.name) + instance_name = Instance.get_name(instance) + vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise Exception('instance not present %s' % instance_name) task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) yield self._session.wait_for_task(task) @defer.inlineCallbacks def destroy(self, instance): - vm = yield VMHelper.lookup(self._session, instance.name) + vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. diff --git a/nova/virt/xenapi/power_state.py b/nova/virt/xenapi/volumeops.py similarity index 64% rename from nova/virt/xenapi/power_state.py rename to nova/virt/xenapi/volumeops.py index 5892f0f485c7..f5b43adfb305 100644 --- a/nova/virt/xenapi/power_state.py +++ b/nova/virt/xenapi/volumeops.py @@ -14,12 +14,22 @@ # License for the specific language governing permissions and limitations # under the License. +""" +Management class for Storage-related functions (attach, detach, etc). +""" + +from twisted.internet import defer + +from nova import exception from nova.compute import power_state -XENAPI_POWER_STATE = { - 'Halted': power_state.SHUTDOWN, - 'Running': power_state.RUNNING, - 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed': power_state.CRASHED} +class VMOps(object): + def __init__(self, session): + self._session = session + + def attach_volume(self, instance_name, device_path, mountpoint): + return True + + def detach_volume(self, instance_name, mountpoint): + return True \ No newline at end of file From 9d26ad69bfeb88106a08f0f3f1e15ed621c18af2 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 10:25:52 +0000 Subject: [PATCH 26/83] first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later --- nova/virt/xenapi/network_utils.py | 10 ---------- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 1 - nova/virt/xenapi/volumeops.py | 5 ----- 4 files changed, 1 insertion(+), 17 deletions(-) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index e062f916f037..83ade13892d6 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -25,16 +25,6 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task - -from nova import db -from nova import flags -from nova import process -from nova import utils -from nova.auth.manager import AuthManager # wrap this one -from nova.compute import instance_types # wrap this one -from nova.virt import images # wrap this one - -import power_state class NetworkHelper(): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 8329f0d7e25f..a1b444e4179b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -152,7 +152,7 @@ class VMHelper(): @classmethod @utils.deferredToThread def lookup(self, session, i): - return VMHelper.lookup_blocking(i) + return VMHelper.lookup_blocking(session, i) @classmethod def lookup_blocking(self, session, i): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index abb42250202a..c04a9f4ec42e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,7 +25,6 @@ from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task - import VMHelper import NetworkHelper diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index f5b43adfb305..fd316a0b8f0e 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,11 +18,6 @@ Management class for Storage-related functions (attach, detach, etc). """ -from twisted.internet import defer - -from nova import exception -from nova.compute import power_state - class VMOps(object): def __init__(self, session): From 9e34c9c7dc88d9e361c7f2d05e06b53ff68ee53f Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 12:52:03 +0000 Subject: [PATCH 27/83] fixed deps --- nova/virt/xapi/__init__.py | 15 +++++++++ nova/virt/{xenapi => xapi}/network_utils.py | 16 ++++----- nova/virt/{xenapi => xapi}/novadeps.py | 31 ++++++++++-------- nova/virt/{xenapi => xapi}/vm_utils.py | 36 ++++++++++----------- nova/virt/{xenapi => xapi}/vmops.py | 35 ++++++++++---------- nova/virt/{xenapi => xapi}/volumeops.py | 4 +-- nova/virt/xenapi.py | 11 ++++--- 7 files changed, 83 insertions(+), 65 deletions(-) create mode 100644 nova/virt/xapi/__init__.py rename nova/virt/{xenapi => xapi}/network_utils.py (90%) rename nova/virt/{xenapi => xapi}/novadeps.py (93%) rename nova/virt/{xenapi => xapi}/vm_utils.py (92%) rename nova/virt/{xenapi => xapi}/vmops.py (87%) rename nova/virt/{xenapi => xapi}/volumeops.py (95%) diff --git a/nova/virt/xapi/__init__.py b/nova/virt/xapi/__init__.py new file mode 100644 index 000000000000..3d598c463cc0 --- /dev/null +++ b/nova/virt/xapi/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xapi/network_utils.py similarity index 90% rename from nova/virt/xenapi/network_utils.py rename to nova/virt/xapi/network_utils.py index 83ade13892d6..b58b9159c201 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xapi/network_utils.py @@ -15,19 +15,17 @@ # under the License. """ -Helper methods for operations related to the management of network records and +Helper methods for operations related to the management of network records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ -import logging -import xmlrpclib - from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task - - + + class NetworkHelper(): + def __init__(self, session): + return + @classmethod @defer.inlineCallbacks def find_network_with_bridge(self, session, bridge): @@ -39,4 +37,4 @@ class NetworkHelper(): elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: - raise Exception('Found no network for bridge %s' % bridge) \ No newline at end of file + raise Exception('Found no network for bridge %s' % bridge) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xapi/novadeps.py similarity index 93% rename from nova/virt/xenapi/novadeps.py rename to nova/virt/xapi/novadeps.py index a4e51226308c..8cb5e3246f5b 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xapi/novadeps.py @@ -22,7 +22,7 @@ from nova import utils from nova.compute import power_state from nova.auth.manager import AuthManager from nova.compute import instance_types -from nova.virt import images +from nova.virt import images XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -30,33 +30,34 @@ XENAPI_POWER_STATE = { 'Paused': power_state.PAUSED, 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} - + + class Instance(object): @classmethod def get_name(self, instance): return instance.name - + @classmethod def get_type(self, instance): return instance_types.INSTANCE_TYPES[instance.instance_type] - + @classmethod def get_project(self, instance): return AuthManager().get_project(instance.project_id) - + @classmethod def get_project_id(self, instance): return instance.project_id - + @classmethod def get_image_id(self, instance): return instance.image_id - + @classmethod def get_kernel_id(self, instance): return instance.kernel_id - + @classmethod def get_ramdisk_id(self, instance): return instance.ramdisk_id @@ -64,28 +65,30 @@ class Instance(object): @classmethod def get_network(self, instance): return db.project_get_network(None, instance.project_id) - + @classmethod def get_mac(self, instance): return instance.mac_address - + @classmethod def get_user(self, instance): return AuthManager().get_user(instance.user_id) - + class Network(object): @classmethod def get_bridge(self, network): return network.bridge - + + class Image(object): @classmethod def get_url(self, image): return images.image_url(image) - + + class User(object): @classmethod @@ -94,4 +97,4 @@ class User(object): @classmethod def get_secret(self, user): - return user.secret \ No newline at end of file + return user.secret diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xapi/vm_utils.py similarity index 92% rename from nova/virt/xenapi/vm_utils.py rename to nova/virt/xapi/vm_utils.py index a1b444e4179b..41f687ccbb13 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xapi/vm_utils.py @@ -15,35 +15,32 @@ # under the License. """ -Helper methods for operations related to the management of VM records and +Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ import logging -import xmlrpclib from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task -from nova import db -from nova import flags -from nova import process from nova import utils from novadeps import Instance from novadeps import Image from novadeps import User - - + + class VMHelper(): + def __init__(self, session): + return + @classmethod @defer.inlineCallbacks def create_vm(self, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" - instance_type = Instance.get_type(instance) + instance_type = Instance.get_type(instance) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { @@ -77,10 +74,11 @@ class VMHelper(): } logging.debug('Created VM %s...', Instance.get_name(instance)) vm_ref = yield session.call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', Instance.get_name(instance), vm_ref) + logging.debug('Created VM %s as %s.', + Instance.get_name(instance), vm_ref) defer.returnValue(vm_ref) - - @classmethod + + @classmethod @defer.inlineCallbacks def create_vbd(self, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new @@ -105,7 +103,7 @@ class VMHelper(): vdi_ref) defer.returnValue(vbd_ref) - @classmethod + @classmethod @defer.inlineCallbacks def create_vif(self, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new @@ -147,9 +145,9 @@ class VMHelper(): args['add_partition'] = 'true' task = yield session.async_call_plugin('objectstore', fn, args) uuid = yield session.wait_for_task(task) - defer.returnValue(uuid) - - @classmethod + defer.returnValue(uuid) + + @classmethod @utils.deferredToThread def lookup(self, session, i): return VMHelper.lookup_blocking(session, i) @@ -165,7 +163,7 @@ class VMHelper(): else: return vms[0] - @classmethod + @classmethod @utils.deferredToThread def lookup_vm_vdis(self, session, vm): return VMHelper.lookup_vm_vdis_blocking(session, vm) @@ -189,4 +187,4 @@ class VMHelper(): if len(vdis) > 0: return vdis else: - return None \ No newline at end of file + return None diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xapi/vmops.py similarity index 87% rename from nova/virt/xenapi/vmops.py rename to nova/virt/xapi/vmops.py index c04a9f4ec42e..d6ea5e7db707 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xapi/vmops.py @@ -19,24 +19,21 @@ Management class for VM-related functions (spawn, reboot, etc). """ import logging -import xmlrpclib from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task - -import VMHelper -import NetworkHelper from novadeps import XENAPI_POWER_STATE -from novadeps import Auth from novadeps import Instance from novadeps import Network +from vm_utils import VMHelper +from network_utils import NetworkHelper + + class VMOps(object): def __init__(self, session): self._session = session - + def list_instances(self): return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] @@ -48,9 +45,9 @@ class VMOps(object): raise Exception('Attempted to create non-unique name %s' % Instance.get_name(instance)) - network = Instance.get_network(instance) + bridge = Network.get_bridge(Instance.get_network(instance)) network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, Network.get_bridge(network)) + yield NetworkHelper.find_network_with_bridge(self._session, bridge) user = Instance.get_user(instance) project = Instance.get_project(instance) @@ -61,14 +58,16 @@ class VMOps(object): ramdisk = yield VMHelper.fetch_image(self._session, Instance.get_ramdisk_id(instance), user, project, False) vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - - vm_ref = yield VMHelper.create_vm(self._session, instance, kernel, ramdisk) + vm_ref = yield VMHelper.create_vm(self._session, + instance, kernel, ramdisk) yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, network_ref, Instance.get_mac(instance)) + yield VMHelper.create_vif(self._session, vm_ref, + network_ref, Instance.get_mac(instance)) logging.debug('Starting VM %s...', vm_ref) yield self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', Instance.get_name(instance), vm_ref) + logging.info('Spawning VM %s created %s.', Instance.get_name(instance), + vm_ref) @defer.inlineCallbacks def reboot(self, instance): @@ -89,7 +88,8 @@ class VMOps(object): # Get the VDIs related to the VM vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) try: - task = yield self._session.call_xenapi('Async.VM.hard_shutdown', vm) + task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + vm) yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) @@ -97,7 +97,8 @@ class VMOps(object): if vdis: for vdi in vdis: try: - task = yield self._session.call_xenapi('Async.VDI.destroy', vdi) + task = yield self._session.call_xenapi('Async.VDI.destroy', + vdi) yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) @@ -119,4 +120,4 @@ class VMOps(object): 'cpu_time': 0} def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' \ No newline at end of file + return 'FAKE CONSOLE OUTPUT' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xapi/volumeops.py similarity index 95% rename from nova/virt/xenapi/volumeops.py rename to nova/virt/xapi/volumeops.py index fd316a0b8f0e..23f79adf7133 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xapi/volumeops.py @@ -19,7 +19,7 @@ Management class for Storage-related functions (attach, detach, etc). """ -class VMOps(object): +class VolumeOps(object): def __init__(self, session): self._session = session @@ -27,4 +27,4 @@ class VMOps(object): return True def detach_volume(self, instance_name, mountpoint): - return True \ No newline at end of file + return True diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 2f2cef75ef91..613f19f82318 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -52,10 +52,13 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor -from twisted.internet import task +#from twisted.internet import task -from xenapi import VMOps -from xenapi import VolumeOps +from nova import flags +from nova import utils + +from xapi.vmops import VMOps +from xapi.volumeops import VolumeOps XenAPI = None @@ -151,7 +154,7 @@ class XenAPISession(object): for m in method.split('.'): f = f.__getattr__(m) return f(*args) - + @utils.deferredToThread def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a From a7fe9d8cbc4bb6b2ca4306c0adff46edcb2fce6f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 29 Nov 2010 14:02:03 +0100 Subject: [PATCH 28/83] Add include_package_data=True to setup.py. This makes sure the various templates get installed into the python path when running "python setup.py install". --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index ec0014478b6c..d88bc1e6f8f6 100644 --- a/setup.py +++ b/setup.py @@ -57,6 +57,7 @@ setup(name='nova', cmdclass={ 'sdist': local_sdist, 'build_sphinx' : local_BuildDoc }, packages=find_packages(exclude=['bin', 'smoketests']), + include_package_data=True, scripts=['bin/nova-api', 'bin/nova-compute', 'bin/nova-dhcpbridge', From c8f6db354f5e8f55b432854d5259dcf84f0c8ba0 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 29 Nov 2010 15:49:12 +0100 Subject: [PATCH 29/83] Make sure templated flags work across calls to ParseNewFlags. ParseNewFlags creates a new FlagValues object, which doesn't have all the previously defined flags, so template lookups fail miserably. Pass the existing FlagValues object too the template mapping object to fix this. --- nova/flags.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 70a049491080..641bda5f98ad 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -40,11 +40,12 @@ class FlagValues(gflags.FlagValues): """ - def __init__(self): + def __init__(self, extra_context=None): gflags.FlagValues.__init__(self) self.__dict__['__dirty'] = [] self.__dict__['__was_already_parsed'] = False self.__dict__['__stored_argv'] = [] + self.__dict__['__extra_context'] = extra_context def __call__(self, argv): # We're doing some hacky stuff here so that we don't have to copy @@ -114,7 +115,7 @@ class FlagValues(gflags.FlagValues): def ParseNewFlags(self): if '__stored_argv' not in self.__dict__: return - new_flags = FlagValues() + new_flags = FlagValues(self) for k in self.__dict__['__dirty']: new_flags[k] = gflags.FlagValues.__getitem__(self, k) @@ -139,18 +140,25 @@ class FlagValues(gflags.FlagValues): val = gflags.FlagValues.__getattr__(self, name) if type(val) is str: tmpl = Template(val) - return tmpl.substitute(StrWrapper(self)) + context = [self, self.__dict__['__extra_context']] + return tmpl.substitute(StrWrapper(context)) return val + class StrWrapper(object): - def __init__(self, obj): - self.wrapped = obj + """Wrapper around FlagValues objects + + Wraps FlagValues objects for string.Template so that we're + sure to return strings.""" + def __init__(self, context_objs): + self.context_objs = context_objs def __getitem__(self, name): - if hasattr(self.wrapped, name): - return str(getattr(self.wrapped, name)) - else: - raise KeyError(name) + for context in self.context_objs: + val = getattr(context, name, False) + if val: + return str(val) + raise KeyError(name) FLAGS = FlagValues() gflags.FLAGS = FLAGS From a82581cbada92d0e274438757f7beb3ed335da1b Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 16:31:31 +0000 Subject: [PATCH 30/83] pep8 fixes and further round of refactoring --- nova/virt/connection.py | 4 ++-- nova/virt/{xapi => xenapi}/__init__.py | 0 nova/virt/{xapi => xenapi}/network_utils.py | 0 nova/virt/{xapi => xenapi}/novadeps.py | 5 ++++- nova/virt/{xapi => xenapi}/vm_utils.py | 2 +- nova/virt/{xapi => xenapi}/vmops.py | 0 nova/virt/{xapi => xenapi}/volumeops.py | 0 nova/virt/{xenapi.py => xenapi_conn.py} | 5 ++--- 8 files changed, 9 insertions(+), 7 deletions(-) rename nova/virt/{xapi => xenapi}/__init__.py (100%) rename nova/virt/{xapi => xenapi}/network_utils.py (100%) rename nova/virt/{xapi => xenapi}/novadeps.py (92%) rename nova/virt/{xapi => xenapi}/vm_utils.py (99%) rename nova/virt/{xapi => xenapi}/vmops.py (100%) rename nova/virt/{xapi => xenapi}/volumeops.py (100%) rename nova/virt/{xenapi.py => xenapi_conn.py} (98%) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 11f0fa8ced65..c40bb4bb4d79 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -25,7 +25,7 @@ import sys from nova import flags from nova.virt import fake from nova.virt import libvirt_conn -from nova.virt import xenapi +from nova.virt import xenapi_conn FLAGS = flags.FLAGS @@ -61,7 +61,7 @@ def get_connection(read_only=False): elif t == 'libvirt': conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': - conn = xenapi.get_connection(read_only) + conn = xenapi_conn.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) diff --git a/nova/virt/xapi/__init__.py b/nova/virt/xenapi/__init__.py similarity index 100% rename from nova/virt/xapi/__init__.py rename to nova/virt/xenapi/__init__.py diff --git a/nova/virt/xapi/network_utils.py b/nova/virt/xenapi/network_utils.py similarity index 100% rename from nova/virt/xapi/network_utils.py rename to nova/virt/xenapi/network_utils.py diff --git a/nova/virt/xapi/novadeps.py b/nova/virt/xenapi/novadeps.py similarity index 92% rename from nova/virt/xapi/novadeps.py rename to nova/virt/xenapi/novadeps.py index 8cb5e3246f5b..ba62468fbccc 100644 --- a/nova/virt/xapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -18,6 +18,7 @@ from nova import db from nova import flags from nova import process from nova import utils +from nova import context from nova.compute import power_state from nova.auth.manager import AuthManager @@ -64,7 +65,9 @@ class Instance(object): @classmethod def get_network(self, instance): - return db.project_get_network(None, instance.project_id) + # TODO: is ge_admin_context the right context to retrieve? + return db.project_get_network(context.get_admin_context(), + instance.project_id) @classmethod def get_mac(self, instance): diff --git a/nova/virt/xapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py similarity index 99% rename from nova/virt/xapi/vm_utils.py rename to nova/virt/xenapi/vm_utils.py index 41f687ccbb13..b68df2791909 100644 --- a/nova/virt/xapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -33,7 +33,7 @@ from novadeps import User class VMHelper(): def __init__(self, session): return - + @classmethod @defer.inlineCallbacks def create_vm(self, session, instance, kernel, ramdisk): diff --git a/nova/virt/xapi/vmops.py b/nova/virt/xenapi/vmops.py similarity index 100% rename from nova/virt/xapi/vmops.py rename to nova/virt/xenapi/vmops.py diff --git a/nova/virt/xapi/volumeops.py b/nova/virt/xenapi/volumeops.py similarity index 100% rename from nova/virt/xapi/volumeops.py rename to nova/virt/xenapi/volumeops.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi_conn.py similarity index 98% rename from nova/virt/xenapi.py rename to nova/virt/xenapi_conn.py index 613f19f82318..0a73b47741f0 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi_conn.py @@ -52,13 +52,12 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor -#from twisted.internet import task from nova import flags from nova import utils -from xapi.vmops import VMOps -from xapi.volumeops import VolumeOps +from xenapi.vmops import VMOps +from xenapi.volumeops import VolumeOps XenAPI = None From 28927f0c9688dd7f3c84a1eda4cc646a1aff7896 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 29 Nov 2010 21:05:40 +0100 Subject: [PATCH 31/83] Import string instead of importing Template from string. This is how we do things. --- nova/flags.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 641bda5f98ad..9487294499e2 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -24,10 +24,9 @@ where they're used. import getopt import os import socket +import string import sys -from string import Template - import gflags @@ -139,7 +138,7 @@ class FlagValues(gflags.FlagValues): self.ParseNewFlags() val = gflags.FlagValues.__getattr__(self, name) if type(val) is str: - tmpl = Template(val) + tmpl = string.Template(val) context = [self, self.__dict__['__extra_context']] return tmpl.substitute(StrWrapper(context)) return val From 03deb0dde48a0b9c7c6c52689ecf8a70e1fa7b7e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 29 Nov 2010 22:01:19 +0100 Subject: [PATCH 32/83] Adjust state_path default setting so that api unit tests find things where they used to find them. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 9487294499e2..cb9fa105b484 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -244,7 +244,7 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') -DEFINE_string('state_path', os.path.abspath("./"), +DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") DEFINE_string('sql_connection', From 2cc492240fab447a62e7ca3ea1c16744baad9256 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 29 Nov 2010 21:23:21 +0000 Subject: [PATCH 33/83] update of nova.sh because default flagfile moved --- contrib/nova.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 6033c9e3a301..7407422b3d34 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -23,7 +23,7 @@ MYSQL_PASS=${MYSQL_PASS:-nova} TEST=${TEST:-0} USE_LDAP=${USE_LDAP:-0} LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} -NET_MAN=${NET_MAN:-FlatDHCPManager} +NET_MAN=${NET_MAN:-VlanManager} # NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface # below but make sure that the interface doesn't already have an # ip or you risk breaking things. @@ -42,10 +42,10 @@ else fi mkdir -p /etc/nova -cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF +cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF --verbose --nodaemon ---dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf --FAKE_subdomain=ec2 --network_manager=nova.network.manager.$NET_MAN --cc_host=$HOST_IP @@ -56,7 +56,7 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF NOVA_CONF_EOF if [ -n "$FLAT_INTERFACE" ]; then - echo "--flat_interface=$FLAT_INTERFACE" >>/etc/nova/nova-manage.conf + echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf fi if [ "$CMD" == "branch" ]; then @@ -142,12 +142,12 @@ if [ "$CMD" == "run" ]; then # nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. - screen_it api "$NOVA_DIR/bin/nova-api --flagfile=/etc/nova/nova-manage.conf" - screen_it objectstore "$NOVA_DIR/bin/nova-objectstore --flagfile=/etc/nova/nova-manage.conf" - screen_it compute "$NOVA_DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf" - screen_it network "$NOVA_DIR/bin/nova-network --flagfile=/etc/nova/nova-manage.conf" - screen_it scheduler "$NOVA_DIR/bin/nova-scheduler --flagfile=/etc/nova/nova-manage.conf" - screen_it volume "$NOVA_DIR/bin/nova-volume --flagfile=/etc/nova/nova-manage.conf" + screen_it api "$NOVA_DIR/bin/nova-api" + screen_it objectstore "$NOVA_DIR/bin/nova-objectstore" + screen_it compute "$NOVA_DIR/bin/nova-compute" + screen_it network "$NOVA_DIR/bin/nova-network" + screen_it scheduler "$NOVA_DIR/bin/nova-scheduler" + screen_it volume "$NOVA_DIR/bin/nova-volume" screen_it test ". $NOVA_DIR/novarc" screen -S nova -x fi From e6dde30724ac47f6abeb5eaa56a68fb9ac166397 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 29 Nov 2010 23:04:54 +0100 Subject: [PATCH 34/83] Correctly handle imageId list passed to DescribeImages API call. --- nova/api/ec2/cloud.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9327bf0d44d5..9cabd2e7d3a0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -994,7 +994,10 @@ class CloudController(object): return True def describe_images(self, context, image_id=None, **kwargs): - imageSet = self.image_service.index(context, image_id) + # Note: image_id is a list! + imageSet = self.image_service.index(context) + if image_id: + imageSet = filter(lambda x: x['imageId'] in image_id, imageSet) return {'imagesSet': imageSet} def deregister_image(self, context, image_id, **kwargs): From 8ee658e7f6da2484377bec7652f37df7259f9e8a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 29 Nov 2010 17:26:05 -0600 Subject: [PATCH 35/83] Return the correct server_management_url --- nova/api/openstack/auth.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index ff428ff701a9..f91742b37835 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -47,7 +47,7 @@ class BasicApiAuthManager(object): except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - token, user = self._authorize_user(username, key) + token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() res.headers['X-Auth-Token'] = token.token_hash @@ -82,8 +82,13 @@ class BasicApiAuthManager(object): return {'id': user.id} return None - def _authorize_user(self, username, key): - """ Generates a new token and assigns it to a user """ + def _authorize_user(self, username, key, req): + """Generates a new token and assigns it to a user. + + username - string + key - string API key + req - webob.Request object + """ user = self.auth.get_user_from_access_key(key) if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, @@ -91,12 +96,10 @@ class BasicApiAuthManager(object): token_dict = {} token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' - token_dict['server_management_url'] = self._get_server_mgmt_url() + # Same as auth url, e.g. http://foo.org:8774/baz/v1.0 + token_dict['server_management_url'] = req.url token_dict['storage_url'] = '' token_dict['user_id'] = user.id token = self.db.auth_create_token(self.context, token_dict) return token, user return None, None - - def _get_server_mgmt_url(self): - return 'https://%s/v1.0/' % self.host From e82afc902020d7bf7bc60141a629287599d8796a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 29 Nov 2010 17:38:51 -0600 Subject: [PATCH 36/83] remove FAKE_subdomain reference --- contrib/nova.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 6033c9e3a301..7f111d2b342a 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -46,7 +46,6 @@ cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF --verbose --nodaemon --dhcpbridge_flagfile=/etc/nova/nova-manage.conf ---FAKE_subdomain=ec2 --network_manager=nova.network.manager.$NET_MAN --cc_host=$HOST_IP --routing_source_ip=$HOST_IP From 6d097a220846c54cb11b4a0e480f282e50db6058 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 30 Nov 2010 09:19:32 +0100 Subject: [PATCH 37/83] Rename imageSet variable to images. --- nova/api/ec2/cloud.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9cabd2e7d3a0..884372ce7b24 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -995,10 +995,10 @@ class CloudController(object): def describe_images(self, context, image_id=None, **kwargs): # Note: image_id is a list! - imageSet = self.image_service.index(context) + images = self.image_service.index(context) if image_id: - imageSet = filter(lambda x: x['imageId'] in image_id, imageSet) - return {'imagesSet': imageSet} + images = filter(lambda x: x['imageId'] in image_id, images) + return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): self.image_service.deregister(context, image_id) From 41b3faf113d7591e61b03678dc13cd9ef031efbb Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 30 Nov 2010 10:40:17 -0500 Subject: [PATCH 38/83] If only I weren't so lazy. --- nova/tests/api/openstack/test_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 29f4b8874772..4b75995dceb4 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -69,7 +69,7 @@ class Test(unittest.TestCase): self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], - "https://foo/v1.0/") + "http://foo/v1.0/") self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") From 84fdd48fe2db20661f076884810f0c726630452f Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 30 Nov 2010 13:52:46 -0500 Subject: [PATCH 39/83] Fix unit tests --- nova/api/openstack/auth.py | 5 +---- nova/tests/api/openstack/fakes.py | 1 - nova/tests/api/openstack/test_auth.py | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index f91742b37835..20503591542f 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -23,10 +23,7 @@ class Context(object): class BasicApiAuthManager(object): """ Implements a somewhat rudimentary version of OpenStack Auth""" - def __init__(self, host=None, db_driver=None): - if not host: - host = FLAGS.host - self.host = host + def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 639a2ebe4e4e..6e91ca7bbf8b 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -54,7 +54,6 @@ def fake_auth_init(self): self.db = FakeAuthDatabase() self.context = Context() self.auth = FakeAuthManager() - self.host = 'foo' @webob.dec.wsgify diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 4b75995dceb4..14e720be41d8 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -62,7 +62,7 @@ class Test(unittest.TestCase): f = fakes.FakeAuthManager() f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) - req = webob.Request.blank('/v1.0/') + req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' result = req.get_response(nova.api.API('os')) From aaee43a74264d5e6a4ccf638f882b19d477c3c9f Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Tue, 30 Nov 2010 23:12:19 +0000 Subject: [PATCH 40/83] Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE_OPENDJ option, that will be checked when USE_LDAP is set. --- contrib/nova.sh | 10 +++- nova/auth/opendj.sh | 119 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 1 deletion(-) create mode 100755 nova/auth/opendj.sh diff --git a/contrib/nova.sh b/contrib/nova.sh index 1a9f93a3b18a..7eb934eca94d 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -22,6 +22,8 @@ USE_MYSQL=${USE_MYSQL:-0} MYSQL_PASS=${MYSQL_PASS:-nova} TEST=${TEST:-0} USE_LDAP=${USE_LDAP:-0} +# Use OpenDJ instead of OpenLDAP when using LDAP +USE_OPENDJ=${USE_OPENDJ:-0} LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} NET_MAN=${NET_MAN:-VlanManager} # NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface @@ -113,7 +115,13 @@ if [ "$CMD" == "run" ]; then rm $NOVA_DIR/nova.sqlite fi if [ "$USE_LDAP" == 1 ]; then - sudo $NOVA_DIR/nova/auth/slap.sh + if [ "$USE_OPENDJ" == 1 ]; then + echo '--ldap_user_dn=cn=Directory Manager' >> \ + /etc/nova/nova-manage.conf + sudo $NOVA_DIR/nova/auth/opendj.sh + else + sudo $NOVA_DIR/nova/auth/slap.sh + fi fi rm -rf $NOVA_DIR/instances mkdir -p $NOVA_DIR/instances diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh new file mode 100755 index 000000000000..8052c077d0dc --- /dev/null +++ b/nova/auth/opendj.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users + +apt-get install -y ldap-utils python-ldap openjdk-6-jre + +if [ ! -d "/usr/opendj" ] +then + # TODO(rlane): Wikimedia Foundation is the current package maintainer. + # After the package is included in Ubuntu's channel, change this. + wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb + dpkg -i opendj_2.4.0-7_amd64.deb +fi + +abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif +cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif +chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif +chown opendj:opendj $schemapath/98-nova_sun.ldif + +cat >/etc/ldap/ldap.conf </etc/ldap/base.ldif < Date: Wed, 1 Dec 2010 11:50:25 +0100 Subject: [PATCH 41/83] Move cc_host and cc_port flags into nova/network/linux_net.py. They weren't used anywhere else. Make cc_host default to nova.utils.get_my_ip() instead of 127.0.0.1. cc_host is used to set up forwarding to the meta-data service, and the kernel doesn't allow routing to a loopback device, so 127.0.0.1 is a poor default. --- nova/flags.py | 2 -- nova/network/linux_net.py | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index cb9fa105b484..1f94feb08b7c 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -223,8 +223,6 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') -DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7b00e65d4290..0fefd9415698 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -46,6 +46,8 @@ flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') +flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') +flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') flags.DEFINE_string('routing_source_ip', '127.0.0.1', 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, From 11dddd7ca4f4264ef3a8f1e251601c1d8fd7a626 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 10:44:51 -0600 Subject: [PATCH 42/83] Move default_flagfile() call to where it will be parsed in time to load the flagfile --- bin/nova-compute | 3 ++- bin/nova-instancemonitor | 2 +- bin/nova-network | 2 +- bin/nova-objectstore | 2 +- bin/nova-scheduler | 2 +- bin/nova-volume | 2 +- 6 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index a66477af536f..1054852c4ce7 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -38,8 +38,9 @@ from nova import utils if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() application = service.Service.create() # pylint: disable=C0103 + diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index a7b7fb0c68e7..9b6c40e8207c 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -42,10 +42,10 @@ logging.getLogger('boto').setLevel(logging.WARN) if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() logging.warn('Starting instance monitor') # pylint: disable-msg=C0103 monitor = monitor.InstanceMonitor() diff --git a/bin/nova-network b/bin/nova-network index 342a63058cc3..d1fb552612a7 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -38,8 +38,8 @@ from nova import utils if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() application = service.Service.create() # pylint: disable-msg=C0103 diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 728f2ee5bcfc..00ae27af93a1 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -42,8 +42,8 @@ FLAGS = flags.FLAGS if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() application = handler.get_application() # pylint: disable-msg=C0103 diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 069b5a6fa950..4d1a40cf10ef 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -38,8 +38,8 @@ from nova import utils if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() application = service.Service.create() diff --git a/bin/nova-volume b/bin/nova-volume index 26148b0ecc2b..e7281d6c0b92 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -38,8 +38,8 @@ from nova import utils if __name__ == '__main__': + utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - utils.default_flagfile() application = service.Service.create() # pylint: disable-msg=C0103 From 6956057ac490c788cb94fbfd0af7fe6e91a7ca96 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 1 Dec 2010 09:24:39 -0800 Subject: [PATCH 43/83] Broke parts of compute manager out into compute.api to separate what gets run on the API side vs the worker side. --- nova/api/ec2/cloud.py | 15 +-- nova/api/openstack/servers.py | 5 +- nova/compute/api.py | 207 +++++++++++++++++++++++++++++++++ nova/compute/manager.py | 169 --------------------------- nova/db/base.py | 36 ++++++ nova/manager.py | 10 +- nova/tests/compute_unittest.py | 8 +- 7 files changed, 262 insertions(+), 188 deletions(-) create mode 100644 nova/compute/api.py create mode 100644 nova/db/base.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c694579674fb..6c09175007f2 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -39,6 +39,7 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova.compute import api as compute_api from nova.compute import instance_types from nova.api import cloud from nova.image.s3 import S3ImageService @@ -94,7 +95,7 @@ class CloudController(object): """ def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_manager = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() self.image_service = S3ImageService() self.setup() @@ -255,7 +256,7 @@ class CloudController(object): return True def describe_security_groups(self, context, group_name=None, **kwargs): - self.compute_manager.ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) if context.user.is_admin(): groups = db.security_group_get_all(context) else: @@ -353,7 +354,7 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): - self.compute_manager.ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -378,7 +379,7 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): - self.compute_manager.ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -414,7 +415,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): - self.compute_manager.ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError('group %s already exists' % group_name) @@ -748,7 +749,7 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) - instances = self.compute_manager.create_instances(context, + instances = self.compute_api.create_instances(context, instance_types.get_by_type(kwargs.get('instance_type', None)), self.image_service, kwargs['image_id'], @@ -789,7 +790,7 @@ class CloudController(object): id_str) continue now = datetime.datetime.utcnow() - self.compute_manager.update_instance(context, + self.compute_api.update_instance(context, instance_ref['id'], state_description='terminating', state=0, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index e1e2bf7fd441..8242c5b4436d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -27,6 +27,7 @@ from nova import wsgi from nova import context from nova.api import cloud from nova.api.openstack import faults +from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack @@ -95,7 +96,7 @@ class Controller(wsgi.Controller): db_driver = FLAGS.db_driver self.db_driver = utils.import_object(db_driver) self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_manager = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() super(Controller, self).__init__() def index(self, req): @@ -147,7 +148,7 @@ class Controller(wsgi.Controller): user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) key_pair = self.db_driver.key_pair_get_all_by_user(None, user_id)[0] - instances = self.compute_manager.create_instances(ctxt, + instances = self.compute_api.create_instances(ctxt, instance_types.get_by_flavor_id(env['server']['flavorId']), utils.import_object(FLAGS.image_service), env['server']['imageId'], diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000000..e678be85d52a --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,207 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all API requests relating to instances (guest vms). +""" + +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(internal_id): + """Default function to generate a hostname given an instance reference.""" + return str(internal_id) + + +class ComputeAPI(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, **kwargs): + self.network_manager = utils.import_object(FLAGS.network_manager) + super(ComputeAPI, self).__init__(**kwargs) + + # TODO(eday): network_topic arg should go away once we push network + # allocation into the scheduler or compute worker. + def create_instances(self, context, instance_type, image_service, image_id, + network_topic, min_count=1, max_count=1, + kernel_id=None, ramdisk_id=None, name='', + description='', user_data='', key_name=None, + key_data=None, security_group='default', + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quote and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', FLAGS.default_kernel) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # Make sure we have access to kernel and ramdisk + image_service.show(context, kernel_id) + image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'server_name': name, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': name, + 'display_description': description, + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug("Going to run %s instances...", num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance_ref = self.create_instance(context, security_groups, + **instance) + instance_id = instance_ref['id'] + internal_id = instance_ref['internal_id'] + hostname = generate_hostname(internal_id) + self.update_instance(context, instance_id, hostname=hostname) + instances.append(dict(id=instance_id, internal_id=internal_id, + hostname=hostname, **instance)) + + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(elevated, + network_topic, + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + logging.debug("Casting to scheduler for %s/%s's instance %s" % + (context.project_id, context.user_id, instance_id)) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + group = db.security_group_create(context, values) + + def create_instance(self, context, security_groups=None, **kwargs): + """Creates the instance in the datastore and returns the + new instance as a mapping + + :param context: The security context + :param security_groups: list of security group ids to + attach to the instance + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + created + + :retval Returns a mapping of the instance information + that has just been created + + """ + instance_ref = self.db.instance_create(context, kwargs) + inst_id = instance_ref['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + inst_id, + security_group_id) + return instance_ref + + def update_instance(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + self.db.instance_update(context, instance_id, kwargs) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3f870f866622..a25b8f6f3602 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,7 +36,6 @@ termination. import datetime import logging -import time from twisted.internet import defer @@ -44,13 +43,9 @@ from nova import db from nova import exception from nova import flags from nova import manager -from nova import quota -from nova import rpc from nova import utils -from nova.compute import instance_types from nova.compute import power_state - FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', utils.abspath('../instances'), 'where instances are stored on disk') @@ -58,11 +53,6 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') -def generate_default_hostname(internal_id): - """Default function to generate a hostname given an instance reference.""" - return str(internal_id) - - class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -94,165 +84,6 @@ class ComputeManager(manager.Manager): """This call passes stright through to the virtualization driver.""" yield self.driver.refresh_security_group(security_group_id) - # TODO(eday): network_topic arg should go away once we push network - # allocation into the scheduler or compute worker. - def create_instances(self, context, instance_type, image_service, image_id, - network_topic, min_count=1, max_count=1, - kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, - key_data=None, security_group='default', - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and - other arguments check out ok.""" - - num_instances = quota.allowed_instances(context, max_count, - instance_type) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', FLAGS.default_kernel) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # Make sure we have access to kernel and ramdisk - image_service.show(context, kernel_id) - image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - type_data = instance_types.INSTANCE_TYPES[instance_type] - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'server_name': name, - 'image_id': image_id, - 'kernel_id': kernel_id, - 'ramdisk_id': ramdisk_id, - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': name, - 'display_description': description, - 'key_name': key_name, - 'key_data': key_data} - - elevated = context.elevated() - instances = [] - logging.debug("Going to run %s instances...", num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance_ref = self.create_instance(context, security_groups, - **instance) - instance_id = instance_ref['id'] - internal_id = instance_ref['internal_id'] - hostname = generate_hostname(internal_id) - self.update_instance(context, instance_id, hostname=hostname) - instances.append(dict(id=instance_id, internal_id=internal_id, - hostname=hostname, **instance)) - - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - instance_id, - is_vpn) - rpc.cast(elevated, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project_id, context.user_id, instance_id)) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - group = db.security_group_create(context, values) - - def create_instance(self, context, security_groups=None, **kwargs): - """Creates the instance in the datastore and returns the - new instance as a mapping - - :param context: The security context - :param security_groups: list of security group ids to - attach to the instance - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - created - - :retval Returns a mapping of the instance information - that has just been created - - """ - instance_ref = self.db.instance_create(context, kwargs) - inst_id = instance_ref['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - inst_id, - security_group_id) - return instance_ref - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - self.db.instance_update(context, instance_id, kwargs) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): diff --git a/nova/db/base.py b/nova/db/base.py new file mode 100644 index 000000000000..1d1e80866bdf --- /dev/null +++ b/nova/db/base.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base class for classes that need modular database access. +""" + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('db_driver', 'nova.db.api', + 'driver to use for database access') + + +class Base(object): + """DB driver is injected in the init method""" + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 diff --git a/nova/manager.py b/nova/manager.py index a6efb8732ca2..5b61f7a4cb3c 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -53,23 +53,19 @@ This module provides Manager, a base class for managers. from nova import utils from nova import flags +from nova.db import base from twisted.internet import defer FLAGS = flags.FLAGS -flags.DEFINE_string('db_driver', 'nova.db.api', - 'driver to use for volume creation') -class Manager(object): - """DB driver is injected in the init method""" +class Manager(base.Base): def __init__(self, host=None, db_driver=None): if not host: host = FLAGS.host self.host = host - if not db_driver: - db_driver = FLAGS.db_driver - self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 + super(Manager, self).__init__(db_driver) @defer.inlineCallbacks def periodic_tasks(self, context=None): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 71a1a4457da7..8f6f35b35ed2 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -31,6 +31,7 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager +from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase): self.flags(connection_type='fake', network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') @@ -76,9 +78,9 @@ class ComputeTestCase(test.TrialTestCase): 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute.create_instance(self.context, - security_groups=[group['id']], - **inst) + ref = self.compute_api.create_instance(self.context, + security_groups=[group['id']], + **inst) # reload to get groups instance_ref = db.instance_get(self.context, ref['id']) try: From 6e6b5325bfc5233b243ed9f8279694136d605ddf Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 11:43:20 -0600 Subject: [PATCH 44/83] Removed a blank line. --- bin/nova-compute | 1 - 1 file changed, 1 deletion(-) diff --git a/bin/nova-compute b/bin/nova-compute index 1054852c4ce7..ac6378f7547f 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -43,4 +43,3 @@ if __name__ == '__main__': if __name__ == '__builtin__': application = service.Service.create() # pylint: disable=C0103 - From 93c7bbf98f0396718724cbf1d4d2f3953078776c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 14:18:24 -0600 Subject: [PATCH 45/83] Remove duplicate field and make OpenStack API return server.name for EC2-API-created instances --- nova/api/openstack/servers.py | 5 ++--- nova/db/sqlalchemy/models.py | 3 +-- nova/tests/api/openstack/test_servers.py | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 1d8aa2fa4dab..44e69b82cfec 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -63,7 +63,7 @@ def _entity_detail(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='server_name', id='id') + flavorId='instance_type', name='display_name', id='id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -78,7 +78,7 @@ def _entity_detail(inst): def _entity_inst(inst): """ Filters all model attributes save for id and name """ - return dict(server=dict(id=inst['id'], name=inst['server_name'])) + return dict(server=dict(id=inst['id'], name=inst['display_name'])) class Controller(wsgi.Controller): @@ -213,7 +213,6 @@ class Controller(wsgi.Controller): if not image: raise Exception("Image not found") - inst['server_name'] = env['server']['name'] inst['image_id'] = image_id inst['user_id'] = user_id inst['launch_time'] = ltime diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 01b5cf350710..fe0a9a92162d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -178,8 +178,6 @@ class Instance(BASE, NovaBase): kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) - server_name = Column(String(255)) - # image_id = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -212,6 +210,7 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8cfc6c45a330..530d067606c9 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -44,7 +44,7 @@ def return_servers(context, user_id=1): def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, server_name='server%s' % id, + return Instance(id=id, state=0, image_id=10, display_name='server%s' % id, user_id=user_id) From fdf0aa30a1127eb8311a599dfdad9653ac699154 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 14:55:42 -0600 Subject: [PATCH 46/83] Todd points out that the API doesn't require a display_name, so let's make a default. That way the OpenStack API can rest assured that its server responses will always have a name key. --- nova/compute/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 50a9d316b456..0893db9fc51d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -99,6 +99,8 @@ class ComputeManager(manager.Manager): that has just been created """ + # Set sane defaults if not specified + kwargs.setdefault('display_name', "Server %s" % kwargs['internal_id']) instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] From f53f5880c08994d04a552a41ce6f88dfbd867946 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 15:53:27 -0600 Subject: [PATCH 47/83] Oops, internal_id isn't available until after a save. This code saves twice; if I moved it into the DB layer we could do it in one save. However, we're moving to one sqlite db per compute worker, so I'd rather have two saves in order to keep the logic in the right layer. --- nova/compute/manager.py | 8 ++++++-- nova/db/sqlalchemy/api.py | 6 ++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0893db9fc51d..6fc5c51866f6 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -99,10 +99,14 @@ class ComputeManager(manager.Manager): that has just been created """ - # Set sane defaults if not specified - kwargs.setdefault('display_name', "Server %s" % kwargs['internal_id']) instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] + # Set sane defaults if not specified + if 'display_name' not in kwargs: + display_name = "Server %s" % instance_ref['internal_id'] + instance_ref['display_name'] = display_name + self.db.instance_update(context, inst_id, + { 'display_name': display_name }) elevated = context.elevated() if not security_groups: diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index afa55fc03a11..dd9649054534 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -530,6 +530,12 @@ def fixed_ip_update(context, address, values): #functions between the two of them as well. @require_context def instance_create(context, values): + """Create a new Instance record in the database. + + context - request context object + values - dict containing column values. + 'internal_id' is auto-generated and should not be specified. + """ instance_ref = models.Instance() instance_ref.update(values) From 8af2b1c97903f11034a95894a23bb7e77f573aa6 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 16:04:04 -0600 Subject: [PATCH 48/83] Going for a record commits per line changes ratio --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6fc5c51866f6..e826bdaa2b52 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -106,7 +106,7 @@ class ComputeManager(manager.Manager): display_name = "Server %s" % instance_ref['internal_id'] instance_ref['display_name'] = display_name self.db.instance_update(context, inst_id, - { 'display_name': display_name }) + {'display_name': display_name}) elevated = context.elevated() if not security_groups: From fd44f9d2ec1d101960642a68d45bffc9c37f0d7f Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 12:13:56 +0000 Subject: [PATCH 49/83] moved flags into xenapi/novadeps.py --- nova/virt/xenapi/novadeps.py | 40 ++++++++++++++++++++++++++++++++++++ nova/virt/xenapi_conn.py | 31 ++++++---------------------- 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index ba62468fbccc..985998486d47 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -33,6 +33,46 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +flags.DEFINE_string('xenapi_connection_url', + None, + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_username', + 'root', + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_password', + None, + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks ' + '(Async.VM.start, etc). Used only if ' + 'connection_type=xenapi.') + + +class Configuration(object): + def __init__(self): + self._flags = flags.FLAGS + + @property + def xenapi_connection_url(self): + return self._flags.xenapi_connection_url + + @property + def xenapi_connection_username(self): + return self._flags.xenapi_connection_username + + @property + def xenapi_connection_password(self): + return self._flags.xenapi_connection_password + + @property + def xenapi_task_poll_interval(self): + return self._flags.xenapi_task_poll_interval + + class Instance(object): @classmethod diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 0a73b47741f0..51091ab19e9a 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -53,33 +53,14 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor -from nova import flags from nova import utils from xenapi.vmops import VMOps from xenapi.volumeops import VolumeOps +from xenapi.novadeps import Configuration XenAPI = None - - -FLAGS = flags.FLAGS -flags.DEFINE_string('xenapi_connection_url', - None, - 'URL for connection to XenServer/Xen Cloud Platform.' - ' Required if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_username', - 'root', - 'Username for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_password', - None, - 'Password for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_float('xenapi_task_poll_interval', - 0.5, - 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' - 'connection_type=xenapi.') +Config = Configuration() def get_connection(_): @@ -90,9 +71,9 @@ def get_connection(_): global XenAPI if XenAPI is None: XenAPI = __import__('XenAPI') - url = FLAGS.xenapi_connection_url - username = FLAGS.xenapi_connection_username - password = FLAGS.xenapi_connection_password + url = Config.xenapi_connection_url + username = Config.xenapi_connection_username + password = Config.xenapi_connection_password if not url or password is None: raise Exception('Must specify xenapi_connection_url, ' 'xenapi_connection_username (optionally), and ' @@ -177,7 +158,7 @@ class XenAPISession(object): #logging.debug('Polling task %s...', task) status = self._session.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, + reactor.callLater(Config.xenapi_task_poll_interval(), self._poll_task, task, deferred) elif status == 'success': result = self._session.xenapi.task.get_result(task) From b684bc26fc7c7f41cf90e0294af35b2bda243733 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 12:36:05 +0000 Subject: [PATCH 50/83] typo fix --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 51091ab19e9a..948fade7e9d2 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -158,7 +158,7 @@ class XenAPISession(object): #logging.debug('Polling task %s...', task) status = self._session.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(Config.xenapi_task_poll_interval(), + reactor.callLater(Config.xenapi_task_poll_interval, self._poll_task, task, deferred) elif status == 'success': result = self._session.xenapi.task.get_result(task) From 1e050bb4a8eeb65a7ac25a9fb90493567b5b07f4 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 2 Dec 2010 15:18:45 +0100 Subject: [PATCH 51/83] Add a helpful error message to nova-manage in case of NoMoreNetworks. This is one of the most common problems people have, and the solution is not currently easily discoverable. This should address that. --- bin/nova-manage | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index eb7c6b87b533..62eec8353ff3 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -359,9 +359,14 @@ class ProjectCommands(object): def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" - zip_file = self.manager.get_credentials(user_id, project_id) - with open(filename, 'w') as f: - f.write(zip_file) + try: + zip_file = self.manager.get_credentials(user_id, project_id) + with open(filename, 'w') as f: + f.write(zip_file) + except db.api.NoMoreNetworks: + print ('No more networks available. If this is a new ' + 'installation, you need\nto call something like this:\n\n' + ' nova-manage network create 10.0.0.0/8 10 64\n\n') class FloatingIpCommands(object): From 3af6da1fa5a38c8238ea45a7b03a6e3fbb78fe5b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 10:08:56 -0600 Subject: [PATCH 52/83] Default Instance.display_name to a value even when None is explicitly passed in. --- nova/compute/manager.py | 2 +- nova/tests/compute_unittest.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e826bdaa2b52..c4a90e6043fa 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -102,7 +102,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] # Set sane defaults if not specified - if 'display_name' not in kwargs: + if kwargs.get('display_name') is None: display_name = "Server %s" % instance_ref['internal_id'] instance_ref['display_name'] = display_name self.db.instance_update(context, inst_id, diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 71a1a4457da7..85992b48cd9d 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -66,6 +66,16 @@ class ComputeTestCase(test.TrialTestCase): inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] + def test_create_instance_defaults_display_name(self): + """Verify that an instance cannot be created without a display_name.""" + cases = [dict(), dict(display_name=None)] + for instance in cases: + ref = self.compute.create_instance(self.context, None, **instance) + try: + self.assertNotEqual(ref.display_name, None) + finally: + db.instance_destroy(self.context, ref['id']) + def test_create_instance_associates_security_groups(self): """Make sure create_instance associates security groups""" inst = {} From 26571952bb8f1015b11d6b9514d232ad8a20d837 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 2 Dec 2010 10:21:43 -0800 Subject: [PATCH 53/83] Moved reboot/rescue methods into nova.compute.api. --- nova/api/cloud.py | 58 ----------------------------------- nova/api/ec2/cloud.py | 7 ++--- nova/api/openstack/servers.py | 3 +- nova/compute/api.py | 27 ++++++++++++++++ 4 files changed, 31 insertions(+), 64 deletions(-) delete mode 100644 nova/api/cloud.py diff --git a/nova/api/cloud.py b/nova/api/cloud.py deleted file mode 100644 index b8f15019f45a..000000000000 --- a/nova/api/cloud.py +++ /dev/null @@ -1,58 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Methods for API calls to control instances via AMQP. -""" - - -from nova import db -from nova import flags -from nova import rpc - -FLAGS = flags.FLAGS - - -def reboot(instance_id, context=None): - """Reboot the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_ref['id']}}) - - -def rescue(instance_id, context): - """Rescue the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_ref['id']}}) - - -def unrescue(instance_id, context): - """Unrescue the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_ref['id']}}) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e50906ae1799..161d2d038dd6 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -41,7 +41,6 @@ from nova import rpc from nova import utils from nova.compute import api as compute_api from nova.compute import instance_types -from nova.api import cloud from nova.image.s3 import S3ImageService @@ -834,19 +833,19 @@ class CloudController(object): """instance_id is a list of instance ids""" for ec2_id in instance_id: internal_id = ec2_id_to_internal_id(ec2_id) - cloud.reboot(internal_id, context=context) + self.compute_api.reboot(context, internal_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" internal_id = ec2_id_to_internal_id(instance_id) - cloud.rescue(internal_id, context=context) + self.compute_api.rescue(context, internal_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" internal_id = ec2_id_to_internal_id(instance_id) - cloud.unrescue(internal_id, context=context) + self.compute_api.unrescue(context, internal_id) return True def update_instance(self, context, ec2_id, **kwargs): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 11170bbf5eee..d34dd78fb0ae 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -25,7 +25,6 @@ from nova import rpc from nova import utils from nova import wsgi from nova import context -from nova.api import cloud from nova.api.openstack import faults from nova.compute import api as compute_api from nova.compute import instance_types @@ -191,7 +190,7 @@ class Controller(wsgi.Controller): inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): return faults.Fault(exc.HTTPUnprocessableEntity()) - cloud.reboot(id) + self.compute_api.reboot(ctxt, id) def _get_network_topic(self, context): """Retrieves the network host for a project""" diff --git a/nova/compute/api.py b/nova/compute/api.py index 929342a1e979..da01ca61ad60 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -210,3 +210,30 @@ class ComputeAPI(base.Base): """ self.db.instance_update(context, instance_id, kwargs) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) From 111285b9bcbee26e3f49b92dcc68355e251007a7 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 2 Dec 2010 10:53:32 -0800 Subject: [PATCH 54/83] Added test files to be ignored. --- .bzrignore | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.bzrignore b/.bzrignore index ab099d3e3297..82db46fa226e 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1,3 +1,13 @@ run_tests.err.log .nova-venv ChangeLog +_trial_temp +keys +networks +nova.sqlite +CA/cacert.pem +CA/index.txt* +CA/openssl.cnf +CA/serial* +CA/newcerts/*.pem +CA/private/cakey.pem From 9d5e1b52f837047aac55d08a664a35be7cc5b8ef Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 12:58:13 -0600 Subject: [PATCH 55/83] Correctly translate instance ids to internal_ids in some spots we neglected. And do some pylint cleanup. --- nova/api/openstack/servers.py | 12 ++++++------ nova/compute/manager.py | 10 ++++------ nova/virt/xenapi.py | 2 -- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 11170bbf5eee..f85aabbfaa60 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import time - import webob from webob import exc @@ -64,7 +62,7 @@ def _entity_detail(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='display_name', id='id') + flavorId='instance_type', name='display_name', id='internal_id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -79,7 +77,7 @@ def _entity_detail(inst): def _entity_inst(inst): """ Filters all model attributes save for id and name """ - return dict(server=dict(id=inst['id'], name=inst['display_name'])) + return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) class Controller(wsgi.Controller): @@ -89,7 +87,7 @@ class Controller(wsgi.Controller): 'application/xml': { "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress", "progress"]}}} + "status", "progress"]}}} def __init__(self, db_driver=None): if not db_driver: @@ -176,7 +174,7 @@ class Controller(wsgi.Controller): self.db_driver.instance_update(ctxt, int(id), _filter_params(inst_dict['server'])) - return faults.Fault(exc.HTTPNoContent()) + return exc.HTTPNoContent() def action(self, req, id): """ multi-purpose method used to reboot, rebuild, and @@ -191,6 +189,8 @@ class Controller(wsgi.Controller): inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): return faults.Fault(exc.HTTPUnprocessableEntity()) + #TODO(gundlach): pass reboot_type, support soft reboot in + #virt driver cloud.reboot(id) def _get_network_topic(self, context): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b5eb23b2400c..dd8d41129cc5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, -responding to calls to check it state, attaching persistent as well as -termination. +responding to calls to check its state, attaching persistent storage, and +terminating it. **Related Flags** @@ -39,7 +39,6 @@ import logging from twisted.internet import defer -from nova import db from nova import exception from nova import flags from nova import manager @@ -50,10 +49,11 @@ FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', - 'Driver to use for volume creation') + 'Driver to use for controlling virtualization') class ComputeManager(manager.Manager): + """Manages the running instances from creation to destruction.""" def __init__(self, compute_driver=None, *args, **kwargs): @@ -93,7 +93,6 @@ class ComputeManager(manager.Manager): if instance_ref['name'] in self.driver.list_instances(): raise exception.Error("Instance has already been created") logging.debug("instance %s: starting...", instance_id) - project_id = instance_ref['project_id'] self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, @@ -135,7 +134,6 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - yield self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 3169562a5318..de3d6858296b 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -52,11 +52,9 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor -from twisted.internet import task from nova import db from nova import flags -from nova import process from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types From 7bcbc2a6e1b907886e03e5254dcd0a726ccdcd9d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 13:29:37 -0600 Subject: [PATCH 56/83] Oops, update 'display_name', not 'name'. And un-extract-method. --- nova/api/openstack/__init__.py | 2 ++ nova/api/openstack/servers.py | 20 +++++++------------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 1dd3ba770e5b..4ca108c4e769 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -25,6 +25,7 @@ import time import logging import routes +import traceback import webob.dec import webob.exc import webob @@ -61,6 +62,7 @@ class API(wsgi.Middleware): return req.get_response(self.application) except Exception as ex: logging.warn("Caught error: %s" % str(ex)) + logging.debug(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f85aabbfaa60..a2a637def893 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -34,16 +34,6 @@ import nova.image.service FLAGS = flags.FLAGS -def _filter_params(inst_dict): - """ Extracts all updatable parameters for a server update request """ - keys = dict(name='name', admin_pass='adminPass') - new_attrs = {} - for k, v in keys.items(): - if v in inst_dict: - new_attrs[k] = inst_dict[v] - return new_attrs - - def _entity_list(entities): """ Coerces a list of servers into proper dictionary format """ return dict(servers=entities) @@ -171,9 +161,13 @@ class Controller(wsgi.Controller): if not instance or instance.user_id != user_id: return faults.Fault(exc.HTTPNotFound()) - self.db_driver.instance_update(ctxt, - int(id), - _filter_params(inst_dict['server'])) + update_dict = {} + if 'adminPass' in inst_dict['server']: + update_dict['admin_pass'] = inst_dict['server']['adminPass'] + if 'name' in inst_dict['server']: + update_dict['display_name'] = inst_dict['server']['name'] + + self.db_driver.instance_update(ctxt, instance['id'], update_dict) return exc.HTTPNoContent() def action(self, req, id): From a8df0a7d6c2de55d7906fa311f79887ccf575508 Mon Sep 17 00:00:00 2001 From: Anne Gentle Date: Thu, 2 Dec 2010 13:48:39 -0600 Subject: [PATCH 57/83] Fixing single node install doc --- doc/source/adminguide/multi.node.install.rst | 13 +++++------- doc/source/adminguide/single.node.install.rst | 20 +++++++++++++++---- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index dcceb539bf9c..1eed30c5b3b2 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -19,7 +19,7 @@ Installing Nova on Multiple Servers =================================== When you move beyond evaluating the technology and into building an actual -production environemnt, you will need to know how to configure your datacenter +production environment, you will need to know how to configure your datacenter and how to deploy components across your clusters. This guide should help you through that process. @@ -161,7 +161,7 @@ Step 3 Setup the sql db GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; SET PASSWORD FOR 'root'@'%' = PASSWORD('nova'); -7. branch and install Nova +7. Branch and install Nova :: @@ -186,9 +186,7 @@ Step 4 Setup Nova environment Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. -On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. We ended up doing this manually, (update query fired directly in the DB). Is there a better way to mark a network as bridged? - -Update: This has been resolved w.e.f 27/10. network is marked as bridged automatically based on the type of n/w manager selected. +On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. More networking details to create a network bridge for flat network ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -233,7 +231,6 @@ unzip them in your home directory, and add them to your environment:: echo ". creds/novarc" >> ~/.bashrc ~/.bashrc - Step 6 Restart all relevant services ------------------------------------ @@ -249,8 +246,8 @@ Restart relevant nova services:: .. todo:: do we still need the content below? -Bare-metal Provisioning ------------------------ +Bare-metal Provisioning Notes +----------------------------- To install the base operating system you can use PXE booting. diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst index 27597962aae6..f6b2290bcffa 100644 --- a/doc/source/adminguide/single.node.install.rst +++ b/doc/source/adminguide/single.node.install.rst @@ -63,8 +63,20 @@ You see an access key and a secret key export, such as these made-up ones::: export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7 +Step 5: Create the network +-------------------------- -Step 5: Create a project with the user you created +Type or copy/paste in the following line to create a network prior to creating a project. + +:: + + sudo nova-manage network create 10.0.0.0/8 1 64 + +For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks. + +After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database. + +Step 6: Create a project with the user you created -------------------------------------------------- Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne. @@ -94,7 +106,7 @@ Type or copy/paste in the following line to create a project named IRT (for Ice Data Base Updated -Step 6: Unzip the nova.zip +Step 7: Unzip the nova.zip -------------------------- You should have a nova.zip file in your current working directory. Unzip it with this command: @@ -116,7 +128,7 @@ You'll see these files extract. extracting: cacert.pem -Step 7: Source the rc file +Step 8: Source the rc file -------------------------- Type or copy/paste the following to source the novarc file in your current working directory. @@ -125,7 +137,7 @@ Type or copy/paste the following to source the novarc file in your current worki . novarc -Step 8: Pat yourself on the back :) +Step 9: Pat yourself on the back :) ----------------------------------- Congratulations, your cloud is up and running, you’ve created an admin user, retrieved the user's credentials and put them in your environment. From 84b130f5fcc02964bc38423bb0153db9cc89e520 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 14:14:31 -0600 Subject: [PATCH 58/83] Update tests to use proper id --- nova/tests/api/openstack/test_servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 2eee4e506239..8060995ad049 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -48,8 +48,8 @@ def return_security_group(context, instance_id, security_group_id): def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, display_name='server%s' % id, - user_id=user_id) + return Instance(id=id+123456, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id, internal_id=id) class ServersTest(unittest.TestCase): From 8be00510243918a67558b60557e7261e4649e94e Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 14:17:41 -0600 Subject: [PATCH 59/83] Use newfangled compute_api --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a2a637def893..e7f765c028df 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -167,7 +167,7 @@ class Controller(wsgi.Controller): if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] - self.db_driver.instance_update(ctxt, instance['id'], update_dict) + self.compute_api.update_instance(ctxt, instance['id'], update_dict) return exc.HTTPNoContent() def action(self, req, id): From ad8577fdf07cc6ef8734962c93c85cb03afe23a7 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 15:33:43 -0600 Subject: [PATCH 60/83] pep8 --- nova/tests/api/openstack/test_servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8060995ad049..44ac8f342734 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -48,7 +48,7 @@ def return_security_group(context, instance_id, security_group_id): def stub_instance(id, user_id=1): - return Instance(id=id+123456, state=0, image_id=10, user_id=user_id, + return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, display_name='server%s' % id, internal_id=id) From 47b47bc4ae34f90a6d1c59718b5ee759fb7c7327 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 2 Dec 2010 15:26:14 -0800 Subject: [PATCH 61/83] Pushed terminate instance and network manager/topic methods into network.compute.api. --- nova/api/ec2/cloud.py | 65 ++----------------- nova/api/openstack/servers.py | 26 ++------ nova/compute/api.py | 82 +++++++++++++++++++++--- nova/tests/api/openstack/test_servers.py | 13 ++++ 4 files changed, 98 insertions(+), 88 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 161d2d038dd6..7978e08a0477 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -94,7 +94,7 @@ class CloudController(object): """ def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute_api.ComputeAPI(self.network_manager) self.image_service = S3ImageService() self.setup() @@ -752,7 +752,6 @@ class CloudController(object): instance_types.get_by_type(kwargs.get('instance_type', None)), self.image_service, kwargs['image_id'], - self._get_network_topic(context), min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), @@ -768,65 +767,11 @@ class CloudController(object): def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. - - instance_id is a kwarg so its name cannot be modified. - """ - ec2_id_list = instance_id + instance_id is a kwarg so its name cannot be modified.""" logging.debug("Going to start terminating instances") - for id_str in ec2_id_list: - internal_id = ec2_id_to_internal_id(id_str) - logging.debug("Going to try and terminate %s" % id_str) - try: - instance_ref = db.instance_get_by_internal_id(context, - internal_id) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate", - id_str) - continue - - if (instance_ref['state_description'] == 'terminating'): - logging.warning("Instance %s is already being terminated", - id_str) - continue - now = datetime.datetime.utcnow() - self.compute_api.update_instance(context, - instance_ref['id'], - state_description='terminating', - state=0, - terminated_at=now) - - # FIXME(ja): where should network deallocate occur? - address = db.instance_get_floating_address(context, - instance_ref['id']) - if address: - logging.debug("Disassociating address %s" % address) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - network_topic = self._get_network_topic(context) - rpc.cast(context, - network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) - - address = db.instance_get_fixed_address(context, - instance_ref['id']) - if address: - logging.debug("Deallocating address %s" % address) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - - host = instance_ref['host'] - if host: - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_ref['id']}}) - else: - db.instance_destroy(context, instance_ref['id']) + for ec2_id in instance_id: + internal_id = ec2_id_to_internal_id(ec2_id) + self.compute_api.delete_instance(context, internal_id) return True def reboot_instances(self, context, instance_id, **kwargs): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index d34dd78fb0ae..1d93f783cf44 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -20,11 +20,12 @@ import time import webob from webob import exc +from nova import context +from nova import exception from nova import flags from nova import rpc from nova import utils from nova import wsgi -from nova import context from nova.api.openstack import faults from nova.compute import api as compute_api from nova.compute import instance_types @@ -94,7 +95,6 @@ class Controller(wsgi.Controller): if not db_driver: db_driver = FLAGS.db_driver self.db_driver = utils.import_object(db_driver) - self.network_manager = utils.import_object(FLAGS.network_manager) self.compute_api = compute_api.ComputeAPI() super(Controller, self).__init__() @@ -132,11 +132,11 @@ class Controller(wsgi.Controller): """ Destroys a server """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) - if instance and instance['user_id'] == user_id: - self.db_driver.instance_destroy(ctxt, id) - return faults.Fault(exc.HTTPAccepted()) - return faults.Fault(exc.HTTPNotFound()) + try: + self.compute_api.delete_instance(ctxt, int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPAccepted()) def create(self, req): """ Creates a new server for a given user """ @@ -151,7 +151,6 @@ class Controller(wsgi.Controller): instance_types.get_by_flavor_id(env['server']['flavorId']), utils.import_object(FLAGS.image_service), env['server']['imageId'], - self._get_network_topic(ctxt), name=env['server']['name'], description=env['server']['name'], key_name=key_pair['name'], @@ -191,14 +190,3 @@ class Controller(wsgi.Controller): if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): return faults.Fault(exc.HTTPUnprocessableEntity()) self.compute_api.reboot(ctxt, id) - - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - network_ref = self.network_manager.get_network(context) - host = network_ref['host'] - if not host: - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return self.db_driver.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/compute/api.py b/nova/compute/api.py index da01ca61ad60..457d6e27fcff 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -20,6 +20,7 @@ Handles all API requests relating to instances (guest vms). """ +import datetime import logging import time @@ -43,17 +44,17 @@ def generate_default_hostname(internal_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, **kwargs): - self.network_manager = utils.import_object(FLAGS.network_manager) + def __init__(self, network_manager=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager super(ComputeAPI, self).__init__(**kwargs) - # TODO(eday): network_topic arg should go away once we push network - # allocation into the scheduler or compute worker. def create_instances(self, context, instance_type, image_service, image_id, - network_topic, min_count=1, max_count=1, - kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, - key_data=None, security_group='default', + min_count=1, max_count=1, kernel_id=None, + ramdisk_id=None, name='', description='', + user_data='', key_name=None, key_data=None, + security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -139,7 +140,7 @@ class ComputeAPI(base.Base): instance_id, is_vpn) rpc.cast(elevated, - network_topic, + self._get_network_topic(context), {"method": "setup_fixed_ip", "args": {"address": address}}) @@ -211,6 +212,58 @@ class ComputeAPI(base.Base): """ self.db.instance_update(context, instance_id, kwargs) + def delete_instance(self, context, instance_id): + logging.debug("Going to try and terminate %d" % instance_id) + try: + instance = self.db.instance_get_by_internal_id(context, + instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found during terminate", + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning("Instance %d is already being terminated", + instance_id) + return + + self.update_instance(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + # FIXME(ja): where should network deallocate occur? + address = self.db.instance_get_floating_address(context, + instance['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(context, + self._get_network_topic(context), + {"method": "disassociate_floating_ip", + "args": {"floating_address": address}}) + + address = self.db.instance_get_fixed_address(context, instance['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -237,3 +290,14 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + network_ref = self.network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return self.db.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 2eee4e506239..aebb3d1b545b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -47,6 +47,14 @@ def return_security_group(context, instance_id, security_group_id): pass +def instance_update(context, instance_id, kwargs): + pass + + +def instance_address(context, instance_id): + return None + + def stub_instance(id, user_id=1): return Instance(id=id, state=0, image_id=10, display_name='server%s' % id, user_id=user_id) @@ -69,6 +77,11 @@ class ServersTest(unittest.TestCase): return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', return_security_group) + self.stubs.Set(nova.db.api, 'instance_update', instance_update) + self.stubs.Set(nova.db.api, 'instance_get_fixed_address', + instance_address) + self.stubs.Set(nova.db.api, 'instance_get_floating_address', + instance_address) def tearDown(self): self.stubs.UnsetAll() From 108bab90cb70798151b8e6a09d2176a3eb120380 Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Thu, 2 Dec 2010 17:01:44 -0800 Subject: [PATCH 62/83] Updated sqlalchemy model to make the internal_id column of the instances table as unsigned integer --- nova/db/sqlalchemy/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a92162d..18ba80caf6cc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -27,6 +27,7 @@ from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.schema import ForeignKeyConstraint +from sqlalchemy.databases import mysql from nova.db.sqlalchemy.session import get_session @@ -155,7 +156,7 @@ class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' id = Column(Integer, primary_key=True) - internal_id = Column(Integer, unique=True) + internal_id = Column(mysql.MSInteger(unsigned=True), unique=True) admin_pass = Column(String(255)) From 4203aa1060e5a97bed86d2e201c4c2443ef7e042 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Fri, 3 Dec 2010 12:21:18 -0800 Subject: [PATCH 63/83] Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accomodate this work. --- nova/api/ec2/cloud.py | 25 +++--- nova/api/openstack/servers.py | 48 +++++------- nova/auth/manager.py | 4 + nova/compute/api.py | 96 +++++++++++------------- nova/db/sqlalchemy/api.py | 1 + nova/flags.py | 2 +- nova/tests/api/openstack/fakes.py | 3 +- nova/tests/api/openstack/test_servers.py | 10 +-- nova/tests/compute_unittest.py | 24 +++--- 9 files changed, 94 insertions(+), 119 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 7978e08a0477..4eef5e1efb79 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -41,7 +41,6 @@ from nova import rpc from nova import utils from nova.compute import api as compute_api from nova.compute import instance_types -from nova.image.s3 import S3ImageService FLAGS = flags.FLAGS @@ -94,8 +93,9 @@ class CloudController(object): """ def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_api = compute_api.ComputeAPI(self.network_manager) - self.image_service = S3ImageService() + self.image_service = utils.import_object(FLAGS.image_service) + self.compute_api = compute_api.ComputeAPI(self.network_manager, + self.image_service) self.setup() def __str__(self): @@ -119,7 +119,7 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in db.instance_get_all_by_project(context, project_id): + for instance in self.compute_api.get_instances(context, project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -438,7 +438,7 @@ class CloudController(object): # instance_id is passed in as a list of instances ec2_id = instance_id[0] internal_id = ec2_id_to_internal_id(ec2_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -535,7 +535,7 @@ class CloudController(object): if volume_ref['attach_status'] == "attached": raise exception.ApiError("Volume is already attached") internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), @@ -613,11 +613,7 @@ class CloudController(object): instances = db.instance_get_all_by_reservation(context, reservation_id) else: - if context.user.is_admin(): - instances = db.instance_get_all(context) - else: - instances = db.instance_get_all_by_project(context, - context.project_id) + instances = self.compute_api.get_instances(context) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -714,7 +710,7 @@ class CloudController(object): def associate_address(self, context, instance_id, public_ip, **kwargs): internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -750,13 +746,12 @@ class CloudController(object): max_count = int(kwargs.get('max_count', 1)) instances = self.compute_api.create_instances(context, instance_types.get_by_type(kwargs.get('instance_type', None)), - self.image_service, kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), - name=kwargs.get('display_name'), + display_name=kwargs.get('display_name'), description=kwargs.get('display_description'), user_data=kwargs.get('user_data', ''), key_name=kwargs.get('key_name'), @@ -801,7 +796,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: internal_id = ec2_id_to_internal_id(ec2_id) - inst = db.instance_get_by_internal_id(context, internal_id) + inst = self.compute_api.get_instance(context, internal_id) db.instance_update(context, inst['id'], kwargs) return True diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a9da148677a6..b644876b0c94 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,23 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import webob from webob import exc from nova import context from nova import exception -from nova import flags -from nova import rpc -from nova import utils from nova import wsgi from nova.api.openstack import faults +from nova.auth import manager as auth_manager from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack -import nova.image.service - -FLAGS = flags.FLAGS def _entity_list(entities): @@ -79,10 +73,7 @@ class Controller(wsgi.Controller): "server": ["id", "imageId", "name", "flavorId", "hostId", "status", "progress"]}}} - def __init__(self, db_driver=None): - if not db_driver: - db_driver = FLAGS.db_driver - self.db_driver = utils.import_object(db_driver) + def __init__(self): self.compute_api = compute_api.ComputeAPI() super(Controller, self).__init__() @@ -101,7 +92,7 @@ class Controller(wsgi.Controller): """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - instance_list = self.db_driver.instance_get_all_by_user(ctxt, user_id) + instance_list = self.compute_api.get_instances(ctxt) limited_list = nova.api.openstack.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) @@ -110,7 +101,7 @@ class Controller(wsgi.Controller): """ Returns server details by server id """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - inst = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) + inst = self.compute_api.get_instance(ctxt, int(id)) if inst: if inst.user_id == user_id: return _entity_detail(inst) @@ -134,12 +125,11 @@ class Controller(wsgi.Controller): user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - key_pair = self.db_driver.key_pair_get_all_by_user(None, user_id)[0] + key_pair = auth_manager.AuthManager.get_key_pairs(ctxt)[0] instances = self.compute_api.create_instances(ctxt, instance_types.get_by_flavor_id(env['server']['flavorId']), - utils.import_object(FLAGS.image_service), env['server']['imageId'], - name=env['server']['name'], + display_name=env['server']['name'], description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key']) @@ -149,27 +139,24 @@ class Controller(wsgi.Controller): """ Updates the server name or password """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - inst_dict = self._deserialize(req.body, req) - if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) - instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) - if not instance or instance.user_id != user_id: - return faults.Fault(exc.HTTPNotFound()) - update_dict = {} if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] - self.compute_api.update_instance(ctxt, instance['id'], update_dict) + try: + self.compute_api.update_instance(ctxt, instance['id'], update_dict) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() def action(self, req, id): - """ multi-purpose method used to reboot, rebuild, and + """ Multi-purpose method used to reboot, rebuild, and resize a server """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) @@ -177,10 +164,11 @@ class Controller(wsgi.Controller): try: reboot_type = input_dict['reboot']['type'] except Exception: - raise faults.Fault(webob.exc.HTTPNotImplemented()) - inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) - if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): + raise faults.Fault(exc.HTTPNotImplemented()) + try: + # TODO(gundlach): pass reboot_type, support soft reboot in + # virt driver + self.compute_api.reboot(ctxt, id) + except: return faults.Fault(exc.HTTPUnprocessableEntity()) - # TODO(gundlach): pass reboot_type, support soft reboot in - # virt driver - self.compute_api.reboot(ctxt, id) + return exc.HTTPNoContent() diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 7b2b6816167c..11c3bd6dfb79 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -624,6 +624,10 @@ class AuthManager(object): with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) + @staticmethod + def get_key_pairs(context): + return db.key_pair_get_all_by_user(context.elevated(), context.user_id) + def get_credentials(self, user, project=None): """Get credential zip for user in project""" if not isinstance(user, User): diff --git a/nova/compute/api.py b/nova/compute/api.py index 457d6e27fcff..995bed91bcde 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -44,16 +44,19 @@ def generate_default_hostname(internal_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, **kwargs): + def __init__(self, network_manager=None, image_service=None, **kwargs): if not network_manager: network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service super(ComputeAPI, self).__init__(**kwargs) - def create_instances(self, context, instance_type, image_service, image_id, - min_count=1, max_count=1, kernel_id=None, - ramdisk_id=None, name='', description='', - user_data='', key_name=None, key_data=None, + def create_instances(self, context, instance_type, image_id, min_count=1, + max_count=1, kernel_id=None, ramdisk_id=None, + display_name='', description='', user_data='', + key_name=None, key_data=None, security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and @@ -70,15 +73,15 @@ class ComputeAPI(base.Base): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: - image = image_service.show(context, image_id) + image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernelId', FLAGS.default_kernel) if ramdisk_id is None: ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) # Make sure we have access to kernel and ramdisk - image_service.show(context, kernel_id) - image_service.show(context, ramdisk_id) + self.image_service.show(context, kernel_id) + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -111,7 +114,7 @@ class ComputeAPI(base.Base): 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], - 'display_name': name, + 'display_name': display_name, 'display_description': description, 'key_name': key_name, 'key_data': key_data} @@ -123,14 +126,25 @@ class ComputeAPI(base.Base): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) - instance_ref = self.create_instance(context, security_groups, - **instance) - instance_id = instance_ref['id'] - internal_id = instance_ref['internal_id'] - hostname = generate_hostname(internal_id) - self.update_instance(context, instance_id, hostname=hostname) - instances.append(dict(id=instance_id, internal_id=internal_id, - hostname=hostname, **instance)) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + internal_id = instance['internal_id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(internal_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % internal_id + + instance = self.update_instance(context, instance_id, **updates) + instances.append(instance) # TODO(vish): This probably should be done in the scheduler # or in compute as a call. The network should be @@ -165,39 +179,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} group = db.security_group_create(context, values) - def create_instance(self, context, security_groups=None, **kwargs): - """Creates the instance in the datastore and returns the - new instance as a mapping - - :param context: The security context - :param security_groups: list of security group ids to - attach to the instance - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - created - - :retval Returns a mapping of the instance information - that has just been created - - """ - instance_ref = self.db.instance_create(context, kwargs) - inst_id = instance_ref['id'] - # Set sane defaults if not specified - if kwargs.get('display_name') is None: - display_name = "Server %s" % instance_ref['internal_id'] - instance_ref['display_name'] = display_name - self.db.instance_update(context, inst_id, - {'display_name': display_name}) - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - inst_id, - security_group_id) - return instance_ref - def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -210,7 +191,7 @@ class ComputeAPI(base.Base): :retval None """ - self.db.instance_update(context, instance_id, kwargs) + return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) @@ -264,6 +245,19 @@ class ComputeAPI(base.Base): else: self.db.instance_destroy(context, instance['id']) + def get_instances(self, context, project_id=None): + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, project_id) + return self.db.instance_get_all(context) + + def get_instance(self, context, instance_id): + return self.db.instance_get_by_internal_id(context, instance_id) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index dd9649054534..ef58f3490dd4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -732,6 +732,7 @@ def instance_update(context, instance_id, values): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) + return instance_ref def instance_add_security_group(context, instance_id, security_group_id): diff --git a/nova/flags.py b/nova/flags.py index 1f94feb08b7c..c6578023d98b 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -259,7 +259,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') # The service to use for image search and retrieval -DEFINE_string('image_service', 'nova.image.local.LocalImageService', +DEFINE_string('image_service', 'nova.image.s3.S3ImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 7c0343942b22..c3f129a32d65 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -67,8 +67,7 @@ def fake_wsgi(self, req): def stub_out_key_pair_funcs(stubs): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] - stubs.Set(nova.db.api, 'key_pair_get_all_by_user', - key_pair) + stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) def stub_out_image_service(stubs): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 46b9c53487cd..8444b6fce628 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -48,7 +48,7 @@ def return_security_group(context, instance_id, security_group_id): def instance_update(context, instance_id, kwargs): - pass + return stub_instance(instance_id) def instance_address(context, instance_id): @@ -106,11 +106,11 @@ class ServersTest(unittest.TestCase): i += 1 def test_create_instance(self): - def server_update(context, id, params): - pass - def instance_create(context, inst): - return {'id': 1, 'internal_id': 1} + return {'id': 1, 'internal_id': 1, 'display_name': ''} + + def server_update(context, id, params): + return instance_create(context, id) def fake_method(*args, **kwargs): pass diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index a55449739ec4..6f3ef96cbbe4 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -72,33 +72,27 @@ class ComputeTestCase(test.TrialTestCase): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: - ref = self.compute_api.create_instance(self.context, None, - **instance) + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, **instance) try: - self.assertNotEqual(ref.display_name, None) + self.assertNotEqual(ref[0].display_name, None) finally: - db.instance_destroy(self.context, ref['id']) + db.instance_destroy(self.context, ref[0]['id']) def test_create_instance_associates_security_groups(self): - """Make sure create_instance associates security groups""" - inst = {} - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + """Make sure create_instances associates security groups""" values = {'name': 'default', 'description': 'default', 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instance(self.context, - security_groups=[group['id']], - **inst) - # reload to get groups - instance_ref = db.instance_get(self.context, ref['id']) + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, security_group=['default']) try: - self.assertEqual(len(instance_ref['security_groups']), 1) + self.assertEqual(len(ref[0]['security_groups']), 1) finally: db.security_group_destroy(self.context, group['id']) - db.instance_destroy(self.context, instance_ref['id']) + db.instance_destroy(self.context, ref[0]['id']) @defer.inlineCallbacks def test_run_terminate(self): From 4f2a8c5398d4d4848f441e366e8bcc5e97a0b34f Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Fri, 3 Dec 2010 13:50:30 -0800 Subject: [PATCH 64/83] Decreased the maximum value for instance-id generation from uint32 to int32 to avoid truncation when being entered into the instance table. Reverted fix to make internal_id column a uint --- nova/db/sqlalchemy/api.py | 2 +- nova/db/sqlalchemy/models.py | 3 +-- nova/image/local.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index dd9649054534..2dc140274294 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -543,7 +543,7 @@ def instance_create(context, values): with session.begin(): while instance_ref.internal_id == None: # Instances have integer internal ids. - internal_id = random.randint(0, 2 ** 32 - 1) + internal_id = random.randint(0, 2 ** 31 - 1) if not instance_internal_id_exists(context, internal_id, session=session): instance_ref.internal_id = internal_id diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 18ba80caf6cc..fe0a9a92162d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -27,7 +27,6 @@ from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.databases import mysql from nova.db.sqlalchemy.session import get_session @@ -156,7 +155,7 @@ class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' id = Column(Integer, primary_key=True) - internal_id = Column(mysql.MSInteger(unsigned=True), unique=True) + internal_id = Column(Integer, unique=True) admin_pass = Column(String(255)) diff --git a/nova/image/local.py b/nova/image/local.py index 9b0cdcc50dd5..b445932218af 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -59,7 +59,7 @@ class LocalImageService(service.BaseImageService): """ Store the image data and return the new image id. """ - id = random.randint(0, 2 ** 32 - 1) + id = random.randint(0, 2 ** 31 - 1) data['id'] = id self.update(context, id, data) return id From 1637de18a86712c52d89441c154a8e9aae6fb503 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 12:42:34 +0000 Subject: [PATCH 65/83] pylint and pep8 fixes --- nova/virt/xenapi/network_utils.py | 13 ++++--- nova/virt/xenapi/novadeps.py | 59 ++++++++++++++++++++++--------- nova/virt/xenapi/vm_utils.py | 30 ++++++++++------ nova/virt/xenapi/vmops.py | 18 +++++++--- nova/virt/xenapi/volumeops.py | 2 ++ nova/virt/xenapi_conn.py | 15 +++++++- 6 files changed, 101 insertions(+), 36 deletions(-) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index b58b9159c201..8cb4cce3a7d3 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -15,20 +15,25 @@ # under the License. """ -Helper methods for operations related to the management of network records and -their attributes like bridges, PIFs, QoS, as well as their lookup functions. +Helper methods for operations related to the management of network +records and their attributes like bridges, PIFs, QoS, as well as +their lookup functions. """ from twisted.internet import defer class NetworkHelper(): - def __init__(self, session): + """ + The class that wraps the helper methods together. + """ + def __init__(self): return @classmethod @defer.inlineCallbacks - def find_network_with_bridge(self, session, bridge): + def find_network_with_bridge(cls, session, bridge): + """ Return the network on which the bridge is attached, if found """ expr = 'field "bridge" = "%s"' % bridge networks = yield session.call_xenapi('network.get_all_records_where', expr) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 985998486d47..a68fd8e77493 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -14,10 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +""" +It captures all the inner details of Nova classes and avoid their exposure +to the implementation of the XenAPI module. One benefit of this, is to avoid +sprawl of code changes +""" + from nova import db from nova import flags -from nova import process -from nova import utils from nova import context from nova.compute import power_state @@ -53,91 +57,114 @@ flags.DEFINE_float('xenapi_task_poll_interval', class Configuration(object): + """ Wraps Configuration details into common class """ def __init__(self): self._flags = flags.FLAGS @property def xenapi_connection_url(self): + """ Return the connection url """ return self._flags.xenapi_connection_url @property def xenapi_connection_username(self): + """ Return the username used for the connection """ return self._flags.xenapi_connection_username @property def xenapi_connection_password(self): + """ Return the password used for the connection """ return self._flags.xenapi_connection_password @property def xenapi_task_poll_interval(self): + """ Return the poll interval for the connection """ return self._flags.xenapi_task_poll_interval class Instance(object): + """ Wraps up instance specifics """ @classmethod - def get_name(self, instance): + def get_name(cls, instance): + """ The name of the instance """ return instance.name @classmethod - def get_type(self, instance): + def get_type(cls, instance): + """ The type of the instance """ return instance_types.INSTANCE_TYPES[instance.instance_type] @classmethod - def get_project(self, instance): + def get_project(cls, instance): + """ The project the instance belongs """ return AuthManager().get_project(instance.project_id) @classmethod - def get_project_id(self, instance): + def get_project_id(cls, instance): + """ The id of the project the instance belongs """ return instance.project_id @classmethod - def get_image_id(self, instance): + def get_image_id(cls, instance): + """ The instance's image id """ return instance.image_id @classmethod - def get_kernel_id(self, instance): + def get_kernel_id(cls, instance): + """ The instance's kernel id """ return instance.kernel_id @classmethod - def get_ramdisk_id(self, instance): + def get_ramdisk_id(cls, instance): + """ The instance's ramdisk id """ return instance.ramdisk_id @classmethod - def get_network(self, instance): + def get_network(cls, instance): + """ The network the instance is connected to """ # TODO: is ge_admin_context the right context to retrieve? return db.project_get_network(context.get_admin_context(), instance.project_id) @classmethod - def get_mac(self, instance): + def get_mac(cls, instance): + """ The instance's MAC address """ return instance.mac_address @classmethod - def get_user(self, instance): + def get_user(cls, instance): + """ The owner of the instance """ return AuthManager().get_user(instance.user_id) class Network(object): + """ Wraps up network specifics """ @classmethod - def get_bridge(self, network): + def get_bridge(cls, network): + """ the bridge for the network """ return network.bridge class Image(object): + """ Wraps up image specifics """ @classmethod - def get_url(self, image): + def get_url(cls, image): + """ the url to get the image from """ return images.image_url(image) class User(object): + """ Wraps up user specifics """ @classmethod - def get_access(self, user, project): + def get_access(cls, user, project): + """ access key """ return AuthManager().get_access_key(user, project) @classmethod - def get_secret(self, user): + def get_secret(cls, user): + """ access secret """ return user.secret diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b68df2791909..002f00c03f2b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -31,12 +31,15 @@ from novadeps import User class VMHelper(): - def __init__(self, session): + """ + The class that wraps the helper methods together. + """ + def __init__(self): return @classmethod @defer.inlineCallbacks - def create_vm(self, session, instance, kernel, ramdisk): + def create_vm(cls, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -80,7 +83,7 @@ class VMHelper(): @classmethod @defer.inlineCallbacks - def create_vbd(self, session, vm_ref, vdi_ref, userdevice, bootable): + def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -105,7 +108,7 @@ class VMHelper(): @classmethod @defer.inlineCallbacks - def create_vif(self, session, vm_ref, network_ref, mac_address): + def create_vif(cls, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -127,7 +130,7 @@ class VMHelper(): @classmethod @defer.inlineCallbacks - def fetch_image(self, session, image, user, project, use_sr): + def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for its kernel and ramdisk (if external kernels are being used). @@ -135,7 +138,7 @@ class VMHelper(): url = Image.get_url(image) access = User.get_access(user, project) - logging.debug("Asking xapi to fetch %s as %s" % (url, access)) + logging.debug("Asking xapi to fetch %s as %s", url, access) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url @@ -149,11 +152,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def lookup(self, session, i): + def lookup(cls, session, i): + """ Look the instance i up, and returns it if available """ return VMHelper.lookup_blocking(session, i) @classmethod - def lookup_blocking(self, session, i): + def lookup_blocking(cls, session, i): + """ Synchronous lookup """ vms = session.get_xenapi().VM.get_by_name_label(i) n = len(vms) if n == 0: @@ -165,11 +170,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def lookup_vm_vdis(self, session, vm): + def lookup_vm_vdis(cls, session, vm): + """ Look for the VDIs that are attached to the VM """ return VMHelper.lookup_vm_vdis_blocking(session, vm) @classmethod - def lookup_vm_vdis_blocking(self, session, vm): + def lookup_vm_vdis_blocking(cls, session, vm): + """ Synchronous lookup_vm_vdis """ # Firstly we get the VBDs, then the VDIs. # TODO: do we leave the read-only devices? vbds = session.get_xenapi().VM.get_VBDs(vm) @@ -180,7 +187,8 @@ class VMHelper(): vdi = session.get_xenapi().VBD.get_VDI(vbd) # Test valid VDI record = session.get_xenapi().VDI.get_record(vdi) - except Exception, exc: + logging.debug('VDI %s is still available', record['uuid']) + except XenAPI.Failure, exc: logging.warn(exc) else: vdis.append(vdi) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d6ea5e7db707..7ea8be999b71 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -31,15 +31,20 @@ from network_utils import NetworkHelper class VMOps(object): + """ + Management class for VM-related tasks + """ def __init__(self, session): self._session = session def list_instances(self): + """ List VM instances """ return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] @defer.inlineCallbacks def spawn(self, instance): + """ Create VM instance """ vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) if vm is not None: raise Exception('Attempted to create non-unique name %s' % @@ -71,6 +76,7 @@ class VMOps(object): @defer.inlineCallbacks def reboot(self, instance): + """ Reboot VM instance """ instance_name = Instance.get_name(instance) vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: @@ -80,6 +86,7 @@ class VMOps(object): @defer.inlineCallbacks def destroy(self, instance): + """ Destroy VM instance """ vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -91,7 +98,7 @@ class VMOps(object): task = yield self._session.call_xenapi('Async.VM.hard_shutdown', vm) yield self._session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up if vdis: @@ -100,15 +107,16 @@ class VMOps(object): task = yield self._session.call_xenapi('Async.VDI.destroy', vdi) yield self._session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) try: task = yield self._session.call_xenapi('Async.VM.destroy', vm) yield self._session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) def get_info(self, instance_id): + """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -120,4 +128,6 @@ class VMOps(object): 'cpu_time': 0} def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' + """ Return snapshot of console """ + # TODO: implement this to fix pylint! + return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 23f79adf7133..a4c7a38619ac 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -24,7 +24,9 @@ class VolumeOps(object): self._session = session def attach_volume(self, instance_name, device_path, mountpoint): + # FIXME: that's going to be sorted when iscsi-xenapi lands in branch return True def detach_volume(self, instance_name, mountpoint): + # FIXME: that's going to be sorted when iscsi-xenapi lands in branch return True diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 948fade7e9d2..e5e67128a356 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -83,47 +83,59 @@ def get_connection(_): class XenAPIConnection(object): + """ A connection to XenServer or Xen Cloud Platform """ def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) def list_instances(self): + """ List VM instances """ return self._vmops.list_instances() def spawn(self, instance): + """ Create VM instance """ self._vmops.spawn(instance) def reboot(self, instance): + """ Reboot VM instance """ self._vmops.reboot(instance) def destroy(self, instance): + """ Destroy VM instance """ self._vmops.destroy(instance) def get_info(self, instance_id): + """ Return data about VM instance """ return self._vmops.get_info(instance_id) def get_console_output(self, instance): + """ Return snapshot of console """ return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): + """ Attach volume storage to VM instance """ return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): + """ Detach volume storage to VM instance """ return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): + """ The session to invoke XenAPI SDK calls """ def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): + """ Return the xenapi object """ return self._session.xenapi def get_xenapi_host(self): + """ Return the xenapi host """ return self._session.xenapi.session.get_this_host(self._session.handle) @utils.deferredToThread @@ -170,12 +182,13 @@ class XenAPISession(object): error_info) deferred.errback(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) deferred.errback(exc) def _unwrap_plugin_exceptions(func, *args, **kwargs): + """ Parse exception details """ try: return func(*args, **kwargs) except XenAPI.Failure, exc: From f25a25d2693d603eb9a6f87d9629d53542219736 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 15:53:35 +0000 Subject: [PATCH 66/83] moved XenAPI namespace definition into xenapi/__init__.py --- nova/virt/xenapi/__init__.py | 11 +++++++++++ nova/virt/xenapi/vm_utils.py | 1 + nova/virt/xenapi/vmops.py | 1 + nova/virt/xenapi_conn.py | 7 +------ 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 3d598c463cc0..ece430407178 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,3 +13,14 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +This is loaded late so that there's no need to install this library +when not using XenAPI +""" + +XenAPI = None +global XenAPI + +if XenAPI is None: + XenAPI = __import__('XenAPI') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 002f00c03f2b..52ab2901d455 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -20,6 +20,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions. """ import logging +import XenAPI from twisted.internet import defer diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7ea8be999b71..3db86f179128 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -19,6 +19,7 @@ Management class for VM-related functions (spawn, reboot, etc). """ import logging +import XenAPI from twisted.internet import defer diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index e5e67128a356..2839a753c523 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -58,19 +58,14 @@ from nova import utils from xenapi.vmops import VMOps from xenapi.volumeops import VolumeOps from xenapi.novadeps import Configuration +from xenapi import XenAPI -XenAPI = None Config = Configuration() def get_connection(_): """Note that XenAPI doesn't have a read-only connection mode, so the read_only parameter is ignored.""" - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - global XenAPI - if XenAPI is None: - XenAPI = __import__('XenAPI') url = Config.xenapi_connection_url username = Config.xenapi_connection_username password = Config.xenapi_connection_password From c2e328a158cadf45df9fb07f0c3da91f11ad416e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:46:42 +0000 Subject: [PATCH 67/83] fixed import module in __init__.py --- nova/virt/xenapi/__init__.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index ece430407178..ed8c293a3418 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -19,8 +19,5 @@ This is loaded late so that there's no need to install this library when not using XenAPI """ -XenAPI = None -global XenAPI - -if XenAPI is None: - XenAPI = __import__('XenAPI') +XenAPI = __import__('XenAPI') +global XenAPI \ No newline at end of file From 76fd35b62bf565fe626ca30c412178894d8e579c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 6 Dec 2010 15:14:41 -0500 Subject: [PATCH 68/83] Don't wrap HTTPAccepted in a fault. Correctly pass kwargs to update_instance. --- nova/api/openstack/servers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a9da148677a6..e7ab17d0383e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -124,7 +124,7 @@ class Controller(wsgi.Controller): self.compute_api.delete_instance(ctxt, int(id)) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return faults.Fault(exc.HTTPAccepted()) + return exc.HTTPAccepted() def create(self, req): """ Creates a new server for a given user """ @@ -165,7 +165,7 @@ class Controller(wsgi.Controller): if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] - self.compute_api.update_instance(ctxt, instance['id'], update_dict) + self.compute_api.update_instance(ctxt, instance['id'], **update_dict) return exc.HTTPNoContent() def action(self, req, id): @@ -184,3 +184,4 @@ class Controller(wsgi.Controller): # TODO(gundlach): pass reboot_type, support soft reboot in # virt driver self.compute_api.reboot(ctxt, id) + return exc.HTTPAccepted() From 88c0e3e380d50d5794970063bbe464171089f260 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 7 Dec 2010 04:41:53 +0000 Subject: [PATCH 69/83] modified a few files --- nova/api/ec2/cloud.py | 1 - nova/compute/api.py | 14 ++++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e50906ae1799..a05dc0f1c463 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -760,7 +760,6 @@ class CloudController(object): ramdisk_id=kwargs.get('ramdisk_id'), name=kwargs.get('display_name'), description=kwargs.get('display_description'), - user_data=kwargs.get('user_data', ''), key_name=kwargs.get('key_name'), security_group=kwargs.get('security_group'), generate_hostname=internal_id_to_ec2_id) diff --git a/nova/compute/api.py b/nova/compute/api.py index 929342a1e979..6830bacb842d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -52,7 +52,7 @@ class ComputeAPI(base.Base): def create_instances(self, context, instance_type, image_service, image_id, network_topic, min_count=1, max_count=1, kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, + description='', key_name=None, key_data=None, security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and @@ -143,8 +143,8 @@ class ComputeAPI(base.Base): {"method": "setup_fixed_ip", "args": {"address": address}}) - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project_id, context.user_id, instance_id)) + logging.debug("Casting to scheduler for %s/%s's instance %s", + context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", @@ -154,6 +154,12 @@ class ComputeAPI(base.Base): return instances def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ try: db.security_group_get_by_name(context, context.project_id, 'default') @@ -162,7 +168,7 @@ class ComputeAPI(base.Base): 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} - group = db.security_group_create(context, values) + db.security_group_create(context, values) def create_instance(self, context, security_groups=None, **kwargs): """Creates the instance in the datastore and returns the From 09ebc4c33ff52c352cdab54fea41d1b116a446f4 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 11:31:43 +0000 Subject: [PATCH 70/83] addressed review comments, complied with HACKING guidelines --- nova/virt/xenapi/__init__.py | 8 -- nova/virt/xenapi/novadeps.py | 170 ----------------------------------- nova/virt/xenapi/vm_utils.py | 36 +++++--- nova/virt/xenapi/vmops.py | 42 ++++----- nova/virt/xenapi_conn.py | 36 +++++--- 5 files changed, 70 insertions(+), 222 deletions(-) delete mode 100644 nova/virt/xenapi/novadeps.py diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index ed8c293a3418..3d598c463cc0 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,11 +13,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -""" -This is loaded late so that there's no need to install this library -when not using XenAPI -""" - -XenAPI = __import__('XenAPI') -global XenAPI \ No newline at end of file diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py deleted file mode 100644 index a68fd8e77493..000000000000 --- a/nova/virt/xenapi/novadeps.py +++ /dev/null @@ -1,170 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -It captures all the inner details of Nova classes and avoid their exposure -to the implementation of the XenAPI module. One benefit of this, is to avoid -sprawl of code changes -""" - -from nova import db -from nova import flags -from nova import context - -from nova.compute import power_state -from nova.auth.manager import AuthManager -from nova.compute import instance_types -from nova.virt import images - -XENAPI_POWER_STATE = { - 'Halted': power_state.SHUTDOWN, - 'Running': power_state.RUNNING, - 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed': power_state.CRASHED} - - -flags.DEFINE_string('xenapi_connection_url', - None, - 'URL for connection to XenServer/Xen Cloud Platform.' - ' Required if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_username', - 'root', - 'Username for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_password', - None, - 'Password for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_float('xenapi_task_poll_interval', - 0.5, - 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' - 'connection_type=xenapi.') - - -class Configuration(object): - """ Wraps Configuration details into common class """ - def __init__(self): - self._flags = flags.FLAGS - - @property - def xenapi_connection_url(self): - """ Return the connection url """ - return self._flags.xenapi_connection_url - - @property - def xenapi_connection_username(self): - """ Return the username used for the connection """ - return self._flags.xenapi_connection_username - - @property - def xenapi_connection_password(self): - """ Return the password used for the connection """ - return self._flags.xenapi_connection_password - - @property - def xenapi_task_poll_interval(self): - """ Return the poll interval for the connection """ - return self._flags.xenapi_task_poll_interval - - -class Instance(object): - """ Wraps up instance specifics """ - - @classmethod - def get_name(cls, instance): - """ The name of the instance """ - return instance.name - - @classmethod - def get_type(cls, instance): - """ The type of the instance """ - return instance_types.INSTANCE_TYPES[instance.instance_type] - - @classmethod - def get_project(cls, instance): - """ The project the instance belongs """ - return AuthManager().get_project(instance.project_id) - - @classmethod - def get_project_id(cls, instance): - """ The id of the project the instance belongs """ - return instance.project_id - - @classmethod - def get_image_id(cls, instance): - """ The instance's image id """ - return instance.image_id - - @classmethod - def get_kernel_id(cls, instance): - """ The instance's kernel id """ - return instance.kernel_id - - @classmethod - def get_ramdisk_id(cls, instance): - """ The instance's ramdisk id """ - return instance.ramdisk_id - - @classmethod - def get_network(cls, instance): - """ The network the instance is connected to """ - # TODO: is ge_admin_context the right context to retrieve? - return db.project_get_network(context.get_admin_context(), - instance.project_id) - - @classmethod - def get_mac(cls, instance): - """ The instance's MAC address """ - return instance.mac_address - - @classmethod - def get_user(cls, instance): - """ The owner of the instance """ - return AuthManager().get_user(instance.user_id) - - -class Network(object): - """ Wraps up network specifics """ - - @classmethod - def get_bridge(cls, network): - """ the bridge for the network """ - return network.bridge - - -class Image(object): - """ Wraps up image specifics """ - - @classmethod - def get_url(cls, image): - """ the url to get the image from """ - return images.image_url(image) - - -class User(object): - """ Wraps up user specifics """ - - @classmethod - def get_access(cls, user, project): - """ access key """ - return AuthManager().get_access_key(user, project) - - @classmethod - def get_secret(cls, user): - """ access secret """ - return user.secret diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 52ab2901d455..407acda6e95b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -25,10 +25,17 @@ import XenAPI from twisted.internet import defer from nova import utils +from nova.auth.manager import AuthManager +from nova.compute import instance_types +from nova.virt import images +from nova.compute import power_state -from novadeps import Instance -from novadeps import Image -from novadeps import User +XENAPI_POWER_STATE = { + 'Halted': power_state.SHUTDOWN, + 'Running': power_state.RUNNING, + 'Paused': power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed': power_state.CRASHED} class VMHelper(): @@ -44,7 +51,7 @@ class VMHelper(): """Create a VM record. Returns a Deferred that gives the new VM reference.""" - instance_type = Instance.get_type(instance) + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { @@ -76,10 +83,9 @@ class VMHelper(): 'user_version': '0', 'other_config': {}, } - logging.debug('Created VM %s...', Instance.get_name(instance)) + logging.debug('Created VM %s...', instance.name) vm_ref = yield session.call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', - Instance.get_name(instance), vm_ref) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) defer.returnValue(vm_ref) @classmethod @@ -137,14 +143,14 @@ class VMHelper(): its kernel and ramdisk (if external kernels are being used). Returns a Deferred that gives the new VDI UUID.""" - url = Image.get_url(image) - access = User.get_access(user, project) + url = images.image_url(image) + access = AuthManager().get_access_key(user, project) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = User.get_secret(user) + args['password'] = user.secret if use_sr: args['add_partition'] = 'true' task = yield session.async_call_plugin('objectstore', fn, args) @@ -179,7 +185,7 @@ class VMHelper(): def lookup_vm_vdis_blocking(cls, session, vm): """ Synchronous lookup_vm_vdis """ # Firstly we get the VBDs, then the VDIs. - # TODO: do we leave the read-only devices? + # TODO(Armando): do we leave the read-only devices? vbds = session.get_xenapi().VM.get_VBDs(vm) vdis = [] if vbds: @@ -197,3 +203,11 @@ class VMHelper(): return vdis else: return None + + @classmethod + def compile_info(cls, record): + return {'state': XENAPI_POWER_STATE[record['power_state']], + 'max_mem': long(record['memory_static_max']) >> 10, + 'mem': long(record['memory_dynamic_max']) >> 10, + 'num_cpu': record['VCPUs_max'], + 'cpu_time': 0} diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3db86f179128..3696782b3198 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -23,12 +23,11 @@ import XenAPI from twisted.internet import defer -from novadeps import XENAPI_POWER_STATE -from novadeps import Instance -from novadeps import Network - -from vm_utils import VMHelper -from network_utils import NetworkHelper +from nova import db +from nova import context +from nova.auth.manager import AuthManager +from nova.virt.xenapi.network_utils import NetworkHelper +from nova.virt.xenapi.vm_utils import VMHelper class VMOps(object): @@ -46,39 +45,40 @@ class VMOps(object): @defer.inlineCallbacks def spawn(self, instance): """ Create VM instance """ - vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) + vm = yield VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % - Instance.get_name(instance)) + instance.name) - bridge = Network.get_bridge(Instance.get_network(instance)) + bridge = db.project_get_network(context.get_admin_context(), + instance.project_id).bridge network_ref = \ yield NetworkHelper.find_network_with_bridge(self._session, bridge) - user = Instance.get_user(instance) - project = Instance.get_project(instance) + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) vdi_uuid = yield VMHelper.fetch_image(self._session, - Instance.get_image_id(instance), user, project, True) + instance.image_id, user, project, True) kernel = yield VMHelper.fetch_image(self._session, - Instance.get_kernel_id(instance), user, project, False) + instance.kernel_id, user, project, False) ramdisk = yield VMHelper.fetch_image(self._session, - Instance.get_ramdisk_id(instance), user, project, False) + instance.ramdisk_id, user, project, False) vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield VMHelper.create_vm(self._session, instance, kernel, ramdisk) yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: yield VMHelper.create_vif(self._session, vm_ref, - network_ref, Instance.get_mac(instance)) + network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) yield self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', Instance.get_name(instance), + logging.info('Spawning VM %s created %s.', instance.name, vm_ref) @defer.inlineCallbacks def reboot(self, instance): """ Reboot VM instance """ - instance_name = Instance.get_name(instance) + instance_name = instance.name vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) @@ -88,7 +88,7 @@ class VMOps(object): @defer.inlineCallbacks def destroy(self, instance): """ Destroy VM instance """ - vm = yield VMHelper.lookup(self._session, Instance.get_name(instance)) + vm = yield VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. @@ -122,11 +122,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) - return {'state': XENAPI_POWER_STATE[rec['power_state']], - 'max_mem': long(rec['memory_static_max']) >> 10, - 'mem': long(rec['memory_dynamic_max']) >> 10, - 'num_cpu': rec['VCPUs_max'], - 'cpu_time': 0} + return VMHelper.compile_info(rec) def get_console_output(self, instance): """ Return snapshot of console """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2839a753c523..a2eac4dc2b71 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -49,26 +49,42 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. import logging import xmlrpclib +import XenAPI from twisted.internet import defer from twisted.internet import reactor from nova import utils +from nova import flags +from nova.virt.xenapi.vmops import VMOps +from nova.virt.xenapi.volumeops import VolumeOps -from xenapi.vmops import VMOps -from xenapi.volumeops import VolumeOps -from xenapi.novadeps import Configuration -from xenapi import XenAPI - -Config = Configuration() +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_connection_url', + None, + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_username', + 'root', + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_password', + None, + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks ' + '(Async.VM.start, etc). Used only if ' + 'connection_type=xenapi.') def get_connection(_): """Note that XenAPI doesn't have a read-only connection mode, so the read_only parameter is ignored.""" - url = Config.xenapi_connection_url - username = Config.xenapi_connection_username - password = Config.xenapi_connection_password + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password if not url or password is None: raise Exception('Must specify xenapi_connection_url, ' 'xenapi_connection_username (optionally), and ' @@ -165,7 +181,7 @@ class XenAPISession(object): #logging.debug('Polling task %s...', task) status = self._session.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(Config.xenapi_task_poll_interval, + reactor.callLater(FLAGS.xenapi_task_poll_interval, self._poll_task, task, deferred) elif status == 'success': result = self._session.xenapi.task.get_result(task) From c1a40a8381ae3e559b3faad4a93ffec1abe8907f Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 7 Dec 2010 10:06:49 -0800 Subject: [PATCH 71/83] Added docstring for get_instances. --- nova/compute/api.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/compute/api.py b/nova/compute/api.py index 995bed91bcde..cb23dae555f4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -246,6 +246,9 @@ class ComputeAPI(base.Base): self.db.instance_destroy(context, instance['id']) def get_instances(self, context, project_id=None): + """Get all instances, possibly filtered by project ID or + user ID. If there is no filter and the context is an admin, + it will retreive all instances in the system.""" if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, From d7ca22cce7df319efc57a2e8224016817c92bbdb Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 18:57:44 +0000 Subject: [PATCH 72/83] importing XenAPI module loaded late --- nova/virt/xenapi/vm_utils.py | 9 ++++++--- nova/virt/xenapi/vmops.py | 6 +++++- nova/virt/xenapi_conn.py | 8 +++++++- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 407acda6e95b..99d484ca2be8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -20,15 +20,14 @@ their attributes like VDIs, VIFs, as well as their lookup functions. """ import logging -import XenAPI from twisted.internet import defer from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types -from nova.virt import images from nova.compute import power_state +from nova.virt import images XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -37,13 +36,17 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} +XenAPI = None + class VMHelper(): """ The class that wraps the helper methods together. """ def __init__(self): - return + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') @classmethod @defer.inlineCallbacks diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3696782b3198..d36cdaea5b2f 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -19,7 +19,6 @@ Management class for VM-related functions (spawn, reboot, etc). """ import logging -import XenAPI from twisted.internet import defer @@ -29,12 +28,17 @@ from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper +XenAPI = None + class VMOps(object): """ Management class for VM-related tasks """ def __init__(self, session): + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') self._session = session def list_instances(self): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a2eac4dc2b71..26b30bf92720 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -49,7 +49,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. import logging import xmlrpclib -import XenAPI from twisted.internet import defer from twisted.internet import reactor @@ -78,10 +77,17 @@ flags.DEFINE_float('xenapi_task_poll_interval', '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +XenAPI = None + def get_connection(_): """Note that XenAPI doesn't have a read-only connection mode, so the read_only parameter is ignored.""" + # This is loaded late so that there's no need to install this + # library when not using XenAPI. + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') url = FLAGS.xenapi_connection_url username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password From 994f2820676b47b4f2e919d5ae7d2f9eb66c4372 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 7 Dec 2010 20:25:24 +0100 Subject: [PATCH 73/83] Add Ryan Lucio to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index ef1a535ca9b9..62f0c49d5d60 100644 --- a/Authors +++ b/Authors @@ -20,6 +20,7 @@ Michael Gundlach Monty Taylor Paul Voccio Rick Clark +Ryan Lucio Soren Hansen Todd Willey Vishvananda Ishaya From 06c5889936cec1be503595915a0e0df2c4f925a8 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Tue, 7 Dec 2010 19:35:05 +0000 Subject: [PATCH 74/83] Adding myself to the authors list --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index ef1a535ca9b9..a35398d5d3a1 100644 --- a/Authors +++ b/Authors @@ -25,3 +25,4 @@ Todd Willey Vishvananda Ishaya Youcef Laribi Zhixue Wu +Ryan Lane From bf34529e75022451f3833552df0e807139d0e498 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 7 Dec 2010 21:35:15 +0100 Subject: [PATCH 75/83] Make sure Authors check also works for pending merges (otherwise stuff can get merged that will make the next merge fail this check). --- nova/tests/misc_unittest.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py index 856060afa26c..667c63ad09e3 100644 --- a/nova/tests/misc_unittest.py +++ b/nova/tests/misc_unittest.py @@ -15,7 +15,6 @@ # under the License. import os -import subprocess from nova import test from nova.utils import parse_mailmap, str_dict_replace @@ -24,18 +23,23 @@ from nova.utils import parse_mailmap, str_dict_replace class ProjectTestCase(test.TrialTestCase): def test_authors_up_to_date(self): if os.path.exists('../.bzr'): - log_cmd = subprocess.Popen(["bzr", "log", "-n0"], - stdout=subprocess.PIPE) - changelog = log_cmd.communicate()[0] + contributors = set() + mailmap = parse_mailmap('../.mailmap') - contributors = set() - for l in changelog.split('\n'): - l = l.strip() - if (l.startswith('author:') or l.startswith('committer:') - and not l == 'committer: Tarmac'): - email = l.split(' ')[-1] - contributors.add(str_dict_replace(email, mailmap)) + import bzrlib.workingtree + tree = bzrlib.workingtree.WorkingTree.open('..') + tree.lock_read() + parents = tree.get_parent_ids() + g = tree.branch.repository.get_graph() + for p in parents[1:]: + rev_ids = [r for r, _ in g.iter_ancestry(parents) + if r != "null:"] + revs = tree.branch.repository.get_revisions(rev_ids) + for r in revs: + for author in r.get_apparent_authors(): + email = author.split(' ')[-1] + contributors.add(str_dict_replace(email, mailmap)) authors_file = open('../Authors', 'r').read() From d03620f31aac6e8720bb6dc19860cb609af878c6 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Tue, 7 Dec 2010 21:13:54 +0000 Subject: [PATCH 76/83] Reverting last change --- Authors | 1 - 1 file changed, 1 deletion(-) diff --git a/Authors b/Authors index a35398d5d3a1..ef1a535ca9b9 100644 --- a/Authors +++ b/Authors @@ -25,4 +25,3 @@ Todd Willey Vishvananda Ishaya Youcef Laribi Zhixue Wu -Ryan Lane From 17fd38e3cb277d51dcf9297178879a620623a855 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Tue, 7 Dec 2010 23:46:18 +0000 Subject: [PATCH 77/83] Removing redundant check --- nova/auth/ldapdriver.py | 49 ++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index fa48c8435ff3..d54a0dfa653c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -135,34 +135,29 @@ class LdapDriver(object): if self.__ldap_user_exists(name): # Retrieve user by name user = self.__get_ldap_user(name) - if user.has_key('accessKey') and user.has_key('secretKey') \ - and user.has_key('isAdmin'): - raise exception.Duplicate("LDAP user %s already exists" \ - % name) + # Entry could be malformed, test for missing attrs. + # Malformed entries are useless, replace attributes found. + attr = [] + if user.has_key('secretKey'): + attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + [secret_key])) else: - # Entry could be malformed, test for missing attrs. - # Malformed entries are useless, replace attributes found. - attr = [] - if user.has_key('secretKey'): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ - [secret_key])) - else: - attr.append((self.ldap.MOD_ADD, 'secretKey', \ - [secret_key])) - if user.has_key('accessKey'): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ - [access_key])) - else: - attr.append((self.ldap.MOD_ADD, 'accessKey', \ - [access_key])) - if user.has_key('isAdmin'): - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ - [str(is_admin).upper()])) - else: - attr.append((self.ldap.MOD_ADD, 'isAdmin', \ - [str(is_admin).upper()])) - self.conn.modify_s(self.__uid_to_dn(name), attr) - return self.get_user(name) + attr.append((self.ldap.MOD_ADD, 'secretKey', \ + [secret_key])) + if user.has_key('accessKey'): + attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + [access_key])) + else: + attr.append((self.ldap.MOD_ADD, 'accessKey', \ + [access_key])) + if user.has_key('isAdmin'): + attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ + [str(is_admin).upper()])) + else: + attr.append((self.ldap.MOD_ADD, 'isAdmin', \ + [str(is_admin).upper()])) + self.conn.modify_s(self.__uid_to_dn(name), attr) + return self.get_user(name) else: attr = [ ('objectclass', ['person', From 45324fc9f15135437051eaaedda68a5ef1f0da7a Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Tue, 7 Dec 2010 23:53:01 +0000 Subject: [PATCH 78/83] Raising an exception if the user doesn't exist before trying to modify its attributes --- nova/auth/ldapdriver.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index d54a0dfa653c..5727c8da3147 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -158,6 +158,8 @@ class LdapDriver(object): [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) + else: + raise exception.NotFound("User %s doesn't exist" % name) else: attr = [ ('objectclass', ['person', From abdb8080e365a584c64ce6562934eefb750568ba Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 00:08:47 +0000 Subject: [PATCH 79/83] Clarifying previously commited exception message --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 5727c8da3147..45ea0683d288 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("User %s doesn't exist" % name) + raise exception.NotFound("LDAP object for %s doesn't exist" % name) else: attr = [ ('objectclass', ['person', From 70371ab447bff6af36f12ad9594eb6ffdbff4396 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 00:26:41 +0000 Subject: [PATCH 80/83] pep8 fix --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 45ea0683d288..9baf45c924fa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,8 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" % name) + raise exception.NotFound("LDAP object for %s doesn't exist" + % name) else: attr = [ ('objectclass', ['person', From 9fdff2a0f0b45d7ddf1df58f83ac723fc8d99532 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 00:34:20 +0000 Subject: [PATCH 81/83] More pep8 fixes to remove deprecated functions --- nova/auth/ldapdriver.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9baf45c924fa..c10939d74332 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -138,19 +138,19 @@ class LdapDriver(object): # Entry could be malformed, test for missing attrs. # Malformed entries are useless, replace attributes found. attr = [] - if user.has_key('secretKey'): + if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ [secret_key])) else: attr.append((self.ldap.MOD_ADD, 'secretKey', \ [secret_key])) - if user.has_key('accessKey'): + if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ [access_key])) else: attr.append((self.ldap.MOD_ADD, 'accessKey', \ [access_key])) - if user.has_key('isAdmin'): + if 'isAdmin' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ [str(is_admin).upper()])) else: @@ -298,13 +298,13 @@ class LdapDriver(object): attr = [] # Retrieve user by name user = self.__get_ldap_user(uid) - if user.has_key('secretKey'): + if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'secretKey', \ user['secretKey'])) - if user.has_key('accessKey'): + if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'accessKey', \ user['accessKey'])) - if user.has_key('isAdmin'): + if 'isAdmin' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'isAdmin', \ user['isAdmin'])) self.conn.modify_s(self.__uid_to_dn(uid), attr) @@ -513,8 +513,8 @@ class LdapDriver(object): """Convert ldap attributes to User object""" if attr is None: return None - if (attr.has_key('accessKey') and attr.has_key('secretKey') \ - and attr.has_key('isAdmin')): + if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ + and 'isAdmin' in attr.keys()): return { 'id': attr['uid'][0], 'name': attr['cn'][0], From 708425aa5b42aae0f399b127ee5a648b7162b05e Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 12:20:44 -0800 Subject: [PATCH 82/83] add bzr to the dev dependencies --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 548073326560..17a1a4c5c183 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -20,3 +20,4 @@ mox==0.5.0 -f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz greenlet==0.3.1 nose +bzr From 0ae019062712fd15dd9e040a3fa60546db9c4111 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 8 Dec 2010 23:47:25 +0000 Subject: [PATCH 83/83] added to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 62f0c49d5d60..a1703b279db1 100644 --- a/Authors +++ b/Authors @@ -23,6 +23,7 @@ Rick Clark Ryan Lucio Soren Hansen Todd Willey +Trey Morris Vishvananda Ishaya Youcef Laribi Zhixue Wu