merge trunk, fix conflict

This commit is contained in:
Anthony Young
2011-01-11 17:32:12 -08:00
11 changed files with 342 additions and 52 deletions

View File

@@ -24,8 +24,10 @@ Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Ken Pepple <ken.pepple@gmail.com>
Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>

44
bin/nova-console Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Console Proxy."""
import eventlet
eventlet.monkey_patch()
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
service.serve()
service.wait()

View File

@@ -333,6 +333,11 @@ class ProjectCommands(object):
arguments: name project_manager [description]"""
self.manager.create_project(name, project_manager, description)
def modify(self, name, project_manager, description=None):
"""Modifies a project
arguments: name project_manager [description]"""
self.manager.modify_project(name, project_manager, description)
def delete(self, name):
"""Deletes an existing project
arguments: name"""

3
krm_mapping.json.sample Normal file
View File

@@ -0,0 +1,3 @@
{
"machine" : ["kernel", "ramdisk"]
}

View File

@@ -119,8 +119,7 @@ class LdapDriver(object):
def get_project(self, pid):
"""Retrieve project by id"""
dn = 'cn=%s,%s' % (pid,
FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
@@ -228,7 +227,8 @@ class LdapDriver(object):
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
dn = self.__project_to_dn(name, search=False)
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
@@ -246,23 +246,22 @@ class LdapDriver(object):
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
self.conn.modify_s('cn=%s,%s' % (project_id,
FLAGS.ldap_project_subtree),
attr)
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
@@ -302,7 +301,7 @@ class LdapDriver(object):
roles.append(role)
return roles
else:
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
@@ -335,7 +334,7 @@ class LdapDriver(object):
def delete_project(self, project_id):
"""Delete a project"""
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@@ -367,9 +366,10 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
attr = self.__find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)')
return attr
dn = FLAGS.ldap_user_subtree
query = ('(&(%s=%s)(objectclass=novaUser))' %
(FLAGS.ldap_user_id_attribute, uid))
return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -420,15 +420,13 @@ class LdapDriver(object):
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
@staticmethod
def __role_to_dn(role, project_id=None):
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
return 'cn=%s,cn=%s,%s' % (role,
project_id,
FLAGS.ldap_project_subtree)
project_dn = self.__project_to_dn(project_id)
return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
@@ -534,6 +532,42 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
if search:
query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
user = self.__find_dns(FLAGS.ldap_user_subtree, query)
if len(user) > 0:
userdn = user[0]
return userdn
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
if search:
query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
project = self.__find_dns(FLAGS.ldap_project_subtree, query)
if len(project) > 0:
projectdn = project[0]
return projectdn
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
@@ -550,30 +584,11 @@ class LdapDriver(object):
else:
return None
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@staticmethod
def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
@staticmethod
def __uid_to_dn(uid):
"""Convert uid to dn"""
return (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""

View File

@@ -684,8 +684,7 @@ class AuthManager(object):
else:
regions = {'nova': FLAGS.cc_host}
for region, host in regions.iteritems():
rc = self.__generate_rc(user.access,
user.secret,
rc = self.__generate_rc(user,
pid,
use_dmz,
host)
@@ -725,7 +724,7 @@ class AuthManager(object):
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
@staticmethod
def __generate_rc(access, secret, pid, use_dmz=True, host=None):
def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
cc_host = FLAGS.cc_dmz
@@ -738,14 +737,19 @@ class AuthManager(object):
s3_host = host
cc_host = host
rc = open(FLAGS.credentials_template).read()
rc = rc % {'access': access,
rc = rc % {'access': user.access,
'project': pid,
'secret': secret,
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
'os': '%s://%s:%s%s' % (FLAGS.os_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.os_suffix),
'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}

View File

@@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
export CLOUD_SERVERS_API_KEY="%(access)s"
export CLOUD_SERVERS_USERNAME="%(user)s"
export CLOUD_SERVERS_URL="%(os)s"

View File

@@ -200,10 +200,22 @@ def DECLARE(name, module_string, flag_values=FLAGS):
"%s not defined by %s" % (name, module_string))
def _get_my_ip():
"""Returns the actual ip of the local machine."""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.gaierror as ex:
return "127.0.0.1"
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9
DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
@@ -211,11 +223,13 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
DEFINE_string('glance_host', '127.0.0.1', 'glance host')
DEFINE_string('glance_host', '$my_ip', 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('console_topic', 'console',
'the topic console proxy nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
@@ -241,10 +255,12 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server')
DEFINE_string('os_prefix', 'http', 'prefix for openstack')
DEFINE_string('cc_host', '$my_ip', 'ip of api server')
DEFINE_string('cc_dmz', '$my_ip', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
DEFINE_string('os_suffix', '/v1.0/', 'suffix for openstack')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -276,6 +292,8 @@ DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager',
'Manager for console proxy')
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',

129
nova/tests/test_console.py Normal file
View File

@@ -0,0 +1,129 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Console proxy.
"""
import datetime
import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.console import manager as console_manager
FLAGS = flags.FLAGS
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy"""
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = utils.import_object(FLAGS.console_manager)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.get_admin_context()
self.host = 'test_compute_host'
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ConsoleTestCase, self).tearDown()
def _create_instance(self):
"""Create a test instance"""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
def test_get_pool_for_instance_host(self):
pool = self.console.get_pool_for_instance_host(self.context, self.host)
self.assertEqual(pool['compute_host'], self.host)
def test_get_pool_creates_new_pool_if_needed(self):
self.assertRaises(exception.NotFound,
db.console_pool_get_by_host_type,
self.context,
self.host,
self.console.host,
self.console.driver.console_type)
pool = self.console.get_pool_for_instance_host(self.context,
self.host)
pool2 = db.console_pool_get_by_host_type(self.context,
self.host,
self.console.host,
self.console.driver.console_type)
self.assertEqual(pool['id'], pool2['id'])
def test_get_pool_does_not_create_new_pool_if_exists(self):
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass',
'host': self.console.host,
'console_type': self.console.driver.console_type,
'compute_host': 'sometesthostname'}
new_pool = db.console_pool_create(self.context, pool_info)
pool = self.console.get_pool_for_instance_host(self.context,
'sometesthostname')
self.assertEqual(pool['id'], new_pool['id'])
def test_add_console(self):
instance_id = self._create_instance()
self.console.add_console(self.context, instance_id)
instance = db.instance_get(self.context, instance_id)
pool = db.console_pool_get_by_host_type(self.context,
instance['host'],
self.console.host,
self.console.driver.console_type)
console_instances = [con['instance_id'] for con in pool.consoles]
self.assert_(instance_id in console_instances)
def test_add_console_does_not_duplicate(self):
instance_id = self._create_instance()
cons1 = self.console.add_console(self.context, instance_id)
cons2 = self.console.add_console(self.context, instance_id)
self.assertEqual(cons1, cons2)
def test_remove_console(self):
instance_id = self._create_instance()
console_id = self.console.add_console(self.context, instance_id)
self.console.remove_console(self.context, console_id)
self.assertRaises(exception.NotFound,
db.console_get,
self.context,
console_id)

View File

@@ -249,7 +249,7 @@ class IptablesFirewallTestCase(test.TestCase):
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010'
'# Completed on Mon Dec 6 11:54:13 2010',
]
def test_static_filters(self):
@@ -276,6 +276,20 @@ class IptablesFirewallTestCase(test.TestCase):
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
@@ -297,7 +311,35 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(rule in out_rules,
'Rule went missing: %s' % rule)
print '\n'.join(out_rules)
instance_chain = None
for rule in out_rules:
# This is pretty crude, but it'll do for now
if '-d 10.11.12.13 -j' in rule:
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
security_group_chain in out_rules,
"ICMP acceptance rule wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
' 8 -j ACCEPT' % security_group_chain in out_rules,
"ICMP Echo Request acceptance rule wasn't added")
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
'--dports 80:81 -j ACCEPT' % security_group_chain \
in out_rules,
"TCP port 80/81 acceptance rule wasn't added")
class NWFilterTestCase(test.TestCase):

View File

@@ -41,9 +41,33 @@ def stubout_instance_snapshot(stubs):
rv = done.wait()
return rv
def fake_loop(self):
pass
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
fake_wait_for_task)
stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
sr_ref=sr_ref, sharable=False)
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
vdi_uuid = vdi_rec['uuid']
return vdi_uuid
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
def fake_parse_xmlrpc_value(val):
return val
stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record