Merged trunk.

This commit is contained in:
Eric Day
2010-08-07 14:45:36 -07:00
12 changed files with 336 additions and 355 deletions

View File

@@ -35,32 +35,34 @@ sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
from nova import flags
from nova import rpc
from nova import utils
from nova.compute import linux_net
from nova.compute import network
from nova.network import linux_net
from nova.network import model
from nova.network import service
FLAGS = flags.FLAGS
def add_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
network.lease_ip(ip)
service.VlanNetworkService().lease_ip(ip)
else:
rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
"args" : {"address": ip}})
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
{"method": "lease_ip",
"args" : {"fixed_ip": ip}})
def old_lease(mac, ip, hostname, interface):
logging.debug("Adopted old lease or got a change of mac/hostname")
def del_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
network.release_ip(ip)
service.VlanNetworkService().release_ip(ip)
else:
rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
"args" : {"address": ip}})
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
{"method": "release_ip",
"args" : {"fixed_ip": ip}})
def init_leases(interface):
net = network.get_network_by_interface(interface)
net = model.get_network_by_interface(interface)
res = ""
for host_name in net.hosts:
res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])

View File

@@ -29,16 +29,12 @@ from nova import flags
from nova import utils
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.cloudpipe import pipelib
from nova.endpoint import cloud
FLAGS = flags.FLAGS
class NetworkCommands(object):
def restart(self):
network.restart_nets()
class VpnCommands(object):
def __init__(self):
@@ -170,6 +166,13 @@ class ProjectCommands(object):
arguments: name"""
self.manager.delete_project(name)
def environment(self, project_id, user_id, filename='novarc'):
"""exports environment variables to an sourcable file
arguments: project_id user_id [filename='novarc]"""
rc = self.manager.get_environment_rc(project_id, user_id)
with open(filename, 'w') as f:
f.write(rc)
def list(self):
"""lists all projects
arguments: <none>"""
@@ -182,14 +185,11 @@ class ProjectCommands(object):
self.manager.remove_from_project(user, project)
def zip(self, project_id, user_id, filename='nova.zip'):
"""exports credentials for user to a zip file
"""exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
project = self.manager.get_project(project_id)
if project:
with open(filename, 'w') as f:
f.write(project.get_credentials(user_id))
else:
print "Project %s doesn't exist" % project
zip = self.manager.get_credentials(project_id, user_id)
with open(filename, 'w') as f:
f.write(zip)
def usage(script_name):
@@ -197,7 +197,6 @@ def usage(script_name):
categories = [
('network', NetworkCommands),
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),

View File

@@ -21,12 +21,16 @@
Twistd daemon for the nova network nodes.
"""
from nova import flags
from nova import twistd
from nova.network import service
FLAGS = flags.FLAGS
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = service.NetworkService.create()
application = service.type_to_class(FLAGS.network_type).create()

View File

@@ -28,6 +28,8 @@ import json
from nova import datastore
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
@@ -188,15 +190,18 @@ class FakeLDAP(object):
Args:
dn -- dn to search under
scope -- only SCOPE_SUBTREE is supported
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
if scope != SCOPE_SUBTREE:
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
redis = datastore.Redis.instance()
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
if scope == SCOPE_BASE:
keys = ["%s%s" % (self.__redis_prefix, dn)]
else:
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
objects = []
for key in keys:
# get the attributes from redis

View File

@@ -272,26 +272,30 @@ class LdapDriver(object):
"""Check if project exists"""
return self.get_project(name) != None
def __find_object(self, dn, query = None):
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
objects = self.__find_objects(dn, query)
objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
def __find_dns(self, dn, query=None):
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
if scope is None: # one of the flags is 0!!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
return [dn for dn, attributes in res]
def __find_objects(self, dn, query = None):
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
if scope is None: # one of the flags is 0!!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the attributes
@@ -361,7 +365,8 @@ class LdapDriver(object):
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid))
'(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE)
return res != None
def __add_to_group(self, uid, group_dn):
@@ -391,7 +396,11 @@ class LdapDriver(object):
if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
self.__safe_remove_from_group(uid, group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(
group_dn, uid)
for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""

View File

@@ -36,6 +36,7 @@ from nova import objectstore # for flags
from nova import utils
from nova.auth import ldapdriver # for flags
from nova.auth import signer
from nova.network import vpn
FLAGS = flags.FLAGS
@@ -50,13 +51,14 @@ flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
@@ -64,19 +66,11 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
flags.DEFINE_integer('vpn_start_port', 1000,
'Start port for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
@@ -228,86 +222,6 @@ class Project(AuthBase):
self.member_ids)
class NoMorePorts(exception.Error):
pass
class Vpn(datastore.BasicModel):
"""Manages vpn ips and ports for projects"""
def __init__(self, project_id):
self.project_id = project_id
super(Vpn, self).__init__()
@property
def identifier(self):
"""Identifier used for key in redis"""
return self.project_id
@classmethod
def create(cls, project_id):
"""Creates a vpn for project
This method finds a free ip and port and stores the associated
values in the datastore.
"""
# TODO(vish): get list of vpn ips from redis
port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
vpn = cls(project_id)
# save ip for project
vpn['project'] = project_id
vpn['ip'] = FLAGS.vpn_ip
vpn['port'] = port
vpn.save()
return vpn
@classmethod
def find_free_port_for_ip(cls, ip):
"""Finds a free port for a given ip from the redis set"""
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
redis = datastore.Redis.instance()
key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
not redis.exists(cls._redis_association_name('ip', ip))):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
port = redis.spop(key)
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
"""Calculates the number of free ports for a given ip"""
return datastore.Redis.instance().scard('ip:%s:ports' % ip)
@property
def ip(self):
"""The ip assigned to the project"""
return self['ip']
@property
def port(self):
"""The port assigned to the project"""
return int(self['port'])
def save(self):
"""Saves the association to the given ip"""
self.associate_with('ip', self.ip)
super(Vpn, self).save()
def destroy(self):
"""Cleans up datastore and adds port back to pool"""
self.unassociate_with('ip', self.ip)
datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
super(Vpn, self).destroy()
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
@@ -585,8 +499,6 @@ class AuthManager(object):
description,
member_users)
if project_dict:
if FLAGS.use_vpn:
Vpn.create(project_dict['id'])
return Project(**project_dict)
def add_to_project(self, user, project):
@@ -623,10 +535,10 @@ class AuthManager(object):
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
vpn = Vpn.lookup(Project.safe_id(project))
if not vpn:
return None, None
return (vpn.ip, vpn.port)
network_data = vpn.NetworkData.lookup(Project.safe_id(project))
if not network_data:
raise exception.NotFound('project network data has not been set')
return (network_data.ip, network_data.port)
def delete_project(self, project):
"""Deletes a project"""
@@ -757,25 +669,27 @@ class AuthManager(object):
rc = self.__generate_rc(user.access, user.secret, pid)
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
vpn = Vpn.lookup(pid)
if not vpn:
raise exception.Error("No vpn data allocated for project %s" %
project.name)
configfile = open(FLAGS.vpn_client_template,"r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=vpn.ip,
port=vpn.port)
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
zippy.writestr("nebula-client.conf", config)
network_data = vpn.NetworkData.lookup(pid)
if network_data:
configfile = open(FLAGS.vpn_client_template,"r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=network_data.ip,
port=network_data.port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
logging.warn("No vpn data for project %s" %
pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
@@ -784,6 +698,15 @@ class AuthManager(object):
shutil.rmtree(tmpdir)
return buffer
def get_environment_rc(self, user, project=None):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
return self.__generate_rc(user.access, user.secret, pid)
def __generate_rc(self, access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()

View File

@@ -90,13 +90,15 @@ class BasicModel(object):
@absorb_connection_error
def __init__(self):
self.initial_state = {}
self.state = Redis.instance().hgetall(self.__redis_key)
if self.state:
self.initial_state = self.state
state = Redis.instance().hgetall(self.__redis_key)
if state:
self.initial_state = state
self.state = dict(self.initial_state)
else:
self.initial_state = {}
self.state = self.default_state()
def default_state(self):
"""You probably want to define this in your subclass"""
return {}
@@ -239,7 +241,7 @@ class BasicModel(object):
for key, val in self.state.iteritems():
Redis.instance().hset(self.__redis_key, key, val)
self.add_to_index()
self.initial_state = self.state
self.initial_state = dict(self.state)
return True
@absorb_connection_error

View File

@@ -36,11 +36,11 @@ from nova import utils
from nova.auth import rbac
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.compute.instance_types import INSTANCE_TYPES
from nova.compute import service as compute_service
from nova.endpoint import images
from nova.volume import service as volume_service
from nova.network import service as network_service
from nova.network import model as network_model
from nova.volume import service
FLAGS = flags.FLAGS
@@ -64,7 +64,6 @@ class CloudController(object):
"""
def __init__(self):
self.instdir = model.InstanceDirectory()
self.network = network.PublicNetworkController()
self.setup()
@property
@@ -76,7 +75,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
volume = volume_service.get_volume(volume_id)
volume = service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -222,7 +221,7 @@ class CloudController(object):
callback=_complete)
return d
except users.UserError, e:
except manager.UserError as e:
raise
@rbac.allow('all')
@@ -308,7 +307,7 @@ class CloudController(object):
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
address = self.network.get_host(public_ip)
address = network_model.PublicAddress.lookup(public_ip)
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -330,7 +329,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
volume = volume_service.get_volume(volume_id)
volume = service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -417,7 +416,7 @@ class CloudController(object):
'code': instance.get('state', 0),
'name': instance.get('state_description', 'pending')
}
i['public_dns_name'] = self.network.get_public_ip_for_instance(
i['public_dns_name'] = network_model.get_public_ip_for_instance(
i['instance_id'])
i['private_dns_name'] = instance.get('private_dns_name', None)
if not i['public_dns_name']:
@@ -452,10 +451,10 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
for address in self.network.host_objs:
for address in network_model.PublicAddress.all():
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
address['project_id'] == self.project.id):
address['project_id'] == context.project.id):
address_rv = {
'public_ip': address['address'],
'instance_id' : address.get('instance_id', 'free')
@@ -470,41 +469,63 @@ class CloudController(object):
return {'addressesSet': addresses}
@rbac.allow('netadmin')
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
address = self.network.allocate_ip(
context.user.id, context.project.id, 'public')
return defer.succeed({'addressSet': [{'publicIp' : address}]})
network_topic = yield self._get_network_topic(context)
alloc_result = yield rpc.call(network_topic,
{"method": "allocate_elastic_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
public_ip = alloc_result['result']
defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
self.network.deallocate_ip(public_ip)
return defer.succeed({'releaseResponse': ["Address released."]})
# NOTE(vish): Should we make sure this works?
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "deallocate_elastic_ip",
"args": {"elastic_ip": public_ip}})
defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
def associate_address(self, context, instance_id, **kwargs):
@defer.inlineCallbacks
def associate_address(self, context, instance_id, public_ip, **kwargs):
instance = self._get_instance(context, instance_id)
self.network.associate_address(
kwargs['public_ip'],
instance['private_dns_name'],
instance_id)
return defer.succeed({'associateResponse': ["Address associated."]})
address = self._get_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "associate_elastic_ip",
"args": {"elastic_ip": address['address'],
"fixed_ip": instance['private_dns_name'],
"instance_id": instance['instance_id']}})
defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
address = self._get_address(context, public_ip)
self.network.disassociate_address(public_ip)
# TODO - Strip the IP from the instance
return defer.succeed({'disassociateResponse': ["Address disassociated."]})
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "disassociate_elastic_ip",
"args": {"elastic_ip": address['address']}})
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
def release_ip(self, context, private_ip, **kwargs):
self.network.release_ip(private_ip)
return defer.succeed({'releaseResponse': ["Address released."]})
def lease_ip(self, context, private_ip, **kwargs):
self.network.lease_ip(private_ip)
return defer.succeed({'leaseResponse': ["Address leased."]})
@defer.inlineCallbacks
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
host = network_service.get_host_for_project(context.project.id)
if not host:
result = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
host = result['result']
defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def run_instances(self, context, **kwargs):
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
@@ -536,15 +557,20 @@ class CloudController(object):
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
network_topic = yield self._get_network_topic(context)
# TODO: Get the real security group of launch in here
security_group = "default"
if FLAGS.simple_network:
bridge_name = FLAGS.simple_network_bridge
else:
net = network.BridgedNetwork.get_network_for_project(
context.user.id, context.project.id, security_group)
bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
vpn = False
if image_id == FLAGS.vpn_image_id:
vpn = True
allocate_result = yield rpc.call(network_topic,
{"method": "allocate_fixed_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id,
"security_group": security_group,
"vpn": vpn}})
allocate_data = allocate_result['result']
inst = self.instdir.new()
inst['image_id'] = image_id
inst['kernel_id'] = kernel_id
@@ -557,24 +583,11 @@ class CloudController(object):
inst['key_name'] = kwargs.get('key_name', '')
inst['user_id'] = context.user.id
inst['project_id'] = context.project.id
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
inst['bridge_name'] = bridge_name
if FLAGS.simple_network:
address = network.allocate_simple_ip()
else:
if inst['image_id'] == FLAGS.vpn_image_id:
address = network.allocate_vpn_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address'])
else:
address = network.allocate_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
# TODO: allocate expresses on the router node
inst['security_group'] = security_group
for (key, value) in allocate_data.iteritems():
inst[key] = value
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
@@ -582,40 +595,49 @@ class CloudController(object):
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))
defer.returnValue(self._format_instances(context, reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
network_topic = yield self._get_network_topic(context)
for i in instance_id:
logging.debug("Going to try and terminate %s" % i)
try:
instance = self._get_instance(context, i)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate" % i)
logging.warning("Instance %s was not found during terminate"
% i)
continue
try:
self.network.disassociate_address(
instance.get('public_dns_name', 'bork'))
except:
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
if FLAGS.simple_network:
network.deallocate_simple_ip(instance.get('private_dns_name', None))
else:
try:
self.network.deallocate_ip(instance.get('private_dns_name', None))
except Exception, _err:
pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
elastic_ip = network_model.get_public_ip_for_instance(i)
if elastic_ip:
logging.debug("Disassociating address %s" % elastic_ip)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
rpc.cast(network_topic,
{"method": "disassociate_elastic_ip",
"args": {"elastic_ip": elastic_ip}})
fixed_ip = instance.get('private_dns_name', None)
if fixed_ip:
logging.debug("Deallocating address %s" % fixed_ip)
# NOTE(vish): Right now we don't really care if the ip is
# actually removed. We may need to worry about
# checking this later. Perhaps in the scheduler?
rpc.cast(network_topic,
{"method": "deallocate_fixed_ip",
"args": {"fixed_ip": fixed_ip}})
if instance.get('node_name', 'unassigned') != 'unassigned':
# NOTE(joshua?): It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
"args" : {"instance_id": i}})
{"method": "terminate_instance",
"args": {"instance_id": i}})
else:
instance.destroy()
return defer.succeed(True)
defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
@@ -677,6 +699,8 @@ class CloudController(object):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
if not 'user_group' in kwargs:
raise exception.ApiError('user or group not specified')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:

View File

@@ -238,12 +238,12 @@ def send_message(topic, message, wait=True):
exchange=msg_id,
auto_delete=True,
exchange_type="direct",
routing_key=msg_id,
durable=False)
routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
durable=False,
exchange_type="topic",
routing_key=topic)
publisher.send(message)

View File

@@ -135,10 +135,18 @@ class AuthTestCase(test.BaseTestCase):
self.manager.add_to_project('test2', 'testproj')
self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
def test_208_can_remove_user_from_project(self):
def test_207_can_remove_user_from_project(self):
self.manager.remove_from_project('test2', 'testproj')
self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
def test_208_can_remove_add_user_with_role(self):
self.manager.add_to_project('test2', 'testproj')
self.manager.add_role('test2', 'developer', 'testproj')
self.manager.remove_from_project('test2', 'testproj')
self.assertFalse(self.manager.has_role('test2', 'developer', 'testproj'))
self.manager.add_to_project('test2', 'testproj')
self.manager.remove_from_project('test2', 'testproj')
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
@@ -179,20 +187,6 @@ class AuthTestCase(test.BaseTestCase):
self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
def test_212_vpn_ip_and_port_looks_valid(self):
project = self.manager.get_project('testproj')
self.assert_(project.vpn_ip)
self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
def test_213_too_many_vpns(self):
vpns = []
for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
vpns.append(manager.Vpn.create("vpnuser%s" % i))
self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
for vpn in vpns:
vpn.destroy()
def test_214_can_retrieve_project_by_user(self):
project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
self.assert_(len(self.manager.get_projects()) > 1)

View File

@@ -19,9 +19,7 @@
from datetime import datetime, timedelta
import logging
import time
from twisted.internet import defer
from nova import exception
from nova import flags
from nova import test
from nova import utils
@@ -49,9 +47,9 @@ class ModelTestCase(test.TrialTestCase):
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny'
inst['node_name'] = FLAGS.node_name
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst['private_dns_name'] = '10.0.0.1'
inst.save()
return inst
@@ -71,118 +69,126 @@ class ModelTestCase(test.TrialTestCase):
session_token.save()
return session_token
@defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
instance = yield self.create_instance()
old = yield model.Instance(instance.identifier)
instance = self.create_instance()
old = model.Instance(instance.identifier)
self.assertFalse(old.is_new_record())
@defer.inlineCallbacks
def test_delete_instance(self):
"""create, then destroy, then make sure loads a new record"""
instance = yield self.create_instance()
yield instance.destroy()
newinst = yield model.Instance('i-test')
instance = self.create_instance()
instance.destroy()
newinst = model.Instance('i-test')
self.assertTrue(newinst.is_new_record())
@defer.inlineCallbacks
def test_instance_added_to_set(self):
"""create, then check that it is listed for the project"""
instance = yield self.create_instance()
"""create, then check that it is listed in global set"""
instance = self.create_instance()
found = False
for x in model.InstanceDirectory().all:
if x.identifier == 'i-test':
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_instance_associates_project(self):
"""create, then check that it is listed for the project"""
instance = yield self.create_instance()
instance = self.create_instance()
found = False
for x in model.InstanceDirectory().by_project(instance.project):
if x.identifier == 'i-test':
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_instance_associates_ip(self):
"""create, then check that it is listed for the ip"""
instance = self.create_instance()
found = False
x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
self.assertEqual(x.identifier, 'i-test')
def test_instance_associates_node(self):
"""create, then check that it is listed for the node_name"""
instance = self.create_instance()
found = False
for x in model.InstanceDirectory().by_node(FLAGS.node_name):
if x.identifier == 'i-test':
found = True
self.assertFalse(found)
instance['node_name'] = 'test_node'
instance.save()
for x in model.InstanceDirectory().by_node('test_node'):
if x.identifier == 'i-test':
found = True
self.assert_(found)
def test_host_class_finds_hosts(self):
host = yield self.create_host()
host = self.create_host()
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
@defer.inlineCallbacks
def test_host_class_doesnt_find_missing_hosts(self):
rv = yield model.Host.lookup('woahnelly')
rv = model.Host.lookup('woahnelly')
self.assertEqual(None, rv)
@defer.inlineCallbacks
def test_create_host(self):
"""store with create_host, then test that a load finds it"""
host = yield self.create_host()
old = yield model.Host(host.identifier)
host = self.create_host()
old = model.Host(host.identifier)
self.assertFalse(old.is_new_record())
@defer.inlineCallbacks
def test_delete_host(self):
"""create, then destroy, then make sure loads a new record"""
instance = yield self.create_host()
yield instance.destroy()
newinst = yield model.Host('testhost')
instance = self.create_host()
instance.destroy()
newinst = model.Host('testhost')
self.assertTrue(newinst.is_new_record())
@defer.inlineCallbacks
def test_host_added_to_set(self):
"""create, then check that it is included in list"""
instance = yield self.create_host()
instance = self.create_host()
found = False
for x in model.Host.all():
if x.identifier == 'testhost':
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_create_daemon_two_args(self):
"""create a daemon with two arguments"""
d = yield self.create_daemon()
d = self.create_daemon()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_create_daemon_single_arg(self):
"""Create a daemon using the combined host:bin format"""
d = yield model.Daemon("testhost:nova-testdaemon")
d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_equality_of_daemon_single_and_double_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
d = yield model.Daemon("testhost:nova-testdaemon")
d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_equality_daemon_of_double_and_single_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
d = yield self.create_daemon()
d = self.create_daemon()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_delete_daemon(self):
"""create, then destroy, then make sure loads a new record"""
instance = yield self.create_daemon()
yield instance.destroy()
newinst = yield model.Daemon('testhost', 'nova-testdaemon')
instance = self.create_daemon()
instance.destroy()
newinst = model.Daemon('testhost', 'nova-testdaemon')
self.assertTrue(newinst.is_new_record())
@defer.inlineCallbacks
def test_daemon_heartbeat(self):
"""Create a daemon, sleep, heartbeat, check for update"""
d = yield self.create_daemon()
d = self.create_daemon()
ts = d['updated_at']
time.sleep(2)
d.heartbeat()
@@ -190,70 +196,62 @@ class ModelTestCase(test.TrialTestCase):
ts2 = d2['updated_at']
self.assert_(ts2 > ts)
@defer.inlineCallbacks
def test_daemon_added_to_set(self):
"""create, then check that it is included in list"""
instance = yield self.create_daemon()
instance = self.create_daemon()
found = False
for x in model.Daemon.all():
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_daemon_associates_host(self):
"""create, then check that it is listed for the host"""
instance = yield self.create_daemon()
instance = self.create_daemon()
found = False
for x in model.Daemon.by_host('testhost'):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
@defer.inlineCallbacks
def test_create_session_token(self):
"""create"""
d = yield self.create_session_token()
d = self.create_session_token()
d = model.SessionToken(d.token)
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_delete_session_token(self):
"""create, then destroy, then make sure loads a new record"""
instance = yield self.create_session_token()
yield instance.destroy()
newinst = yield model.SessionToken(instance.token)
instance = self.create_session_token()
instance.destroy()
newinst = model.SessionToken(instance.token)
self.assertTrue(newinst.is_new_record())
@defer.inlineCallbacks
def test_session_token_added_to_set(self):
"""create, then check that it is included in list"""
instance = yield self.create_session_token()
instance = self.create_session_token()
found = False
for x in model.SessionToken.all():
if x.identifier == instance.token:
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_session_token_associates_user(self):
"""create, then check that it is listed for the user"""
instance = yield self.create_session_token()
instance = self.create_session_token()
found = False
for x in model.SessionToken.associated_to('user', 'testuser'):
if x.identifier == instance.identifier:
found = True
self.assertTrue(found)
@defer.inlineCallbacks
def test_session_token_generation(self):
instance = yield model.SessionToken.generate('username', 'TokenType')
instance = model.SessionToken.generate('username', 'TokenType')
self.assertFalse(instance.is_new_record())
@defer.inlineCallbacks
def test_find_generated_session_token(self):
instance = yield model.SessionToken.generate('username', 'TokenType')
found = yield model.SessionToken.lookup(instance.identifier)
instance = model.SessionToken.generate('username', 'TokenType')
found = model.SessionToken.lookup(instance.identifier)
self.assert_(found)
def test_update_session_token_expiry(self):
@@ -264,34 +262,29 @@ class ModelTestCase(test.TrialTestCase):
expiry = utils.parse_isotime(instance['expiry'])
self.assert_(expiry > datetime.utcnow())
@defer.inlineCallbacks
def test_session_token_lookup_when_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
instance.save()
inst = model.SessionToken.lookup(instance.identifier)
self.assertFalse(inst)
@defer.inlineCallbacks
def test_session_token_lookup_when_not_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance = model.SessionToken.generate("testuser")
inst = model.SessionToken.lookup(instance.identifier)
self.assert_(inst)
@defer.inlineCallbacks
def test_session_token_is_expired_when_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
self.assert_(instance.is_expired())
@defer.inlineCallbacks
def test_session_token_is_expired_when_not_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance = model.SessionToken.generate("testuser")
self.assertFalse(instance.is_expired())
@defer.inlineCallbacks
def test_session_token_ttl(self):
instance = yield model.SessionToken.generate("testuser")
instance = model.SessionToken.generate("testuser")
now = datetime.utcnow()
delta = timedelta(hours=1)
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)

View File

@@ -24,8 +24,10 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import network
from nova.compute.exception import NoMoreAddresses
from nova.network import model
from nova.network import service
from nova.network import vpn
from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
@@ -52,7 +54,8 @@ class NetworkTestCase(test.TrialTestCase):
self.projects.append(self.manager.create_project(name,
'netuser',
name))
self.network = network.PublicNetworkController()
self.network = model.PublicNetworkController()
self.service = service.VlanNetworkService()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
@@ -66,16 +69,17 @@ class NetworkTestCase(test.TrialTestCase):
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
self.user.id, self.projects[0].id, utils.generate_mac())
def test_allocate_deallocate_fixed_ip(self):
result = yield self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
address = result['private_dns_name']
mac = result['mac_address']
logging.debug("Was allocated %s" % (address))
net = network.get_project_network(self.projects[0].id, "default")
net = model.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
rv = self.service.deallocate_fixed_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, self.projects[0].id))
@@ -84,15 +88,18 @@ class NetworkTestCase(test.TrialTestCase):
self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
mac = utils.generate_mac()
secondmac = utils.generate_mac()
hostname = "test-host"
address = network.allocate_ip(
self.user.id, self.projects[0].id, mac)
secondaddress = network.allocate_ip(
self.user, self.projects[1].id, secondmac)
net = network.get_project_network(self.projects[0].id, "default")
secondnet = network.get_project_network(self.projects[1].id, "default")
result = yield self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
mac = result['mac_address']
address = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
self.user, self.projects[1].id)
secondmac = result['mac_address']
secondaddress = result['private_dns_name']
net = model.get_project_network(self.projects[0].id, "default")
secondnet = model.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
@@ -103,46 +110,64 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.issue_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
rv = network.deallocate_ip(address)
rv = self.service.deallocate_fixed_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
rv = network.deallocate_ip(secondaddress)
rv = self.service.deallocate_fixed_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
utils.generate_mac())
result = yield self.service.allocate_fixed_ip(self.user.id,
self.projects[0].id)
firstaddress = result['private_dns_name']
hostname = "toomany-hosts"
for i in range(1,5):
project_id = self.projects[i].id
mac = utils.generate_mac()
mac2 = utils.generate_mac()
mac3 = utils.generate_mac()
address = network.allocate_ip(
self.user, project_id, mac)
address2 = network.allocate_ip(
self.user, project_id, mac2)
address3 = network.allocate_ip(
self.user, project_id, mac3)
result = yield self.service.allocate_fixed_ip(
self.user, project_id)
mac = result['mac_address']
address = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
self.user, project_id)
mac2 = result['mac_address']
address2 = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
self.user, project_id)
mac3 = result['mac_address']
address3 = result['private_dns_name']
self.assertEqual(False, is_in_project(address, self.projects[0].id))
self.assertEqual(False, is_in_project(address2, self.projects[0].id))
self.assertEqual(False, is_in_project(address3, self.projects[0].id))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
net = network.get_project_network(project_id, "default")
rv = self.service.deallocate_fixed_ip(address)
rv = self.service.deallocate_fixed_ip(address2)
rv = self.service.deallocate_fixed_ip(address3)
net = model.get_project_network(project_id, "default")
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
net = network.get_project_network(self.projects[0].id, "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
net = model.get_project_network(self.projects[0].id, "default")
rv = self.service.deallocate_fixed_ip(firstaddress)
self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
def test_212_vpn_ip_and_port_looks_valid(self):
vpn.NetworkData.create(self.projects[0].id)
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
def test_too_many_vpns(self):
vpns = []
for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
for network_datum in vpns:
network_datum.destroy()
def test_release_before_deallocate(self):
pass
@@ -169,7 +194,7 @@ class NetworkTestCase(test.TrialTestCase):
NUM_RESERVED_VPN_IPS)
usable addresses
"""
net = network.get_project_network(self.projects[0].id, "default")
net = model.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
@@ -183,22 +208,23 @@ class NetworkTestCase(test.TrialTestCase):
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
macs[i] = utils.generate_mac()
addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
macs[i] = result['mac_address']
addresses[i] = result['private_dns_name']
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
for i in range(0, (num_available_ips - 1)):
rv = network.deallocate_ip(addresses[i])
rv = self.service.deallocate_fixed_ip(addresses[i])
self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
return address in network.get_project_network(project_id).list_addresses()
return address in model.get_project_network(project_id).list_addresses()
def _get_project_addresses(project_id):
project_addresses = []
for addr in network.get_project_network(project_id).list_addresses():
for addr in model.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses