Proposing merge to get feedback on orm refactoring. I am very interested in feedback to all of these changes.
This is a huge set of changes, that touches almost all of the files. I'm sure I have broken quite a bit, but better to take the plunge now than to postpone this until later. The idea is to allow for pluggable backends throughout the code. Brief Overview For compute/volume/network, there are multiple classes service - responsible for rpc this currently uses the existing cast and call in rpc.py and a little bit of magic to call public methods on the manager class. each service also reports its state into the database every 10 seconds manager - responsible for managing respective object classes all the business logic for the classes go here db (db_driver) - responsible for abstracting database access driver (domain_driver) - responsible for executing actual shell commands and implementation Compute hasn't been fully cleaned up, but to get an idea of how it works, take a look at volume and network Known issues/Things to be done: * nova-api accesses db objects directly It seems cleaner to have only the managers dealing with their respective objects. This would mean code for 'run_instances' would move into the manager class and it would do the initial setup and cast out to the remote service * db code uses flat methods to define its interface In my mind this is a little prettier as an abstract base class, but driver loading code can load a module or a class. It works, so I'm not sure it needs to be changed but feel free to debate it. * Service classes have no code in them Not sure if this is a problem for people, but the magic of calling the manager's methods is done in the base class. We could remove the magic from the base class and explicitly wrap methods that we want to make available via rpc if this seems nasty. * AuthManager Projects/Users/Roles are not integrated into this system. In order for everything to live happily in the backend, we need some type of adaptor for LDAP * Context is not passed properly across rabbit Context should probably be changed to a simple dictionary so that it can be passed properly through the queue * No authorization checks on access to objects We need to decide on which layer auth checks should happen. * Some of the methods in ComputeManager need to be moved into other layers/managers * Compute driver layer should be abstracted more cleanly * Flat networking is untested and may need to be reworked * Some of the api commands are not working yet * Nova Swift Authentication needs to be refactored(Todd is working on this)
This commit is contained in:
@@ -32,12 +32,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova.compute import service
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.ComputeService.create() # pylint: disable-msg=C0103
|
||||
application = service.Service.create() # pylint: disable=C0103
|
||||
|
||||
@@ -33,24 +33,32 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.network import linux_net
|
||||
from nova.network import model
|
||||
from nova.network import service
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||
flags.DECLARE('redis_db', 'nova.datastore')
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||
|
||||
|
||||
def add_lease(_mac, ip_address, _hostname, _interface):
|
||||
def add_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
service.VlanNetworkService().lease_ip(ip_address)
|
||||
logging.debug("leasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.lease_fixed_ip(None, mac, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "lease_ip",
|
||||
"args": {"fixed_ip": ip_address}})
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||
{"method": "lease_fixed_ip",
|
||||
"args": {"context": None,
|
||||
"mac": mac,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
def old_lease(_mac, _ip_address, _hostname, _interface):
|
||||
@@ -58,23 +66,24 @@ def old_lease(_mac, _ip_address, _hostname, _interface):
|
||||
logging.debug("Adopted old lease or got a change of mac/hostname")
|
||||
|
||||
|
||||
def del_lease(_mac, ip_address, _hostname, _interface):
|
||||
def del_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
service.VlanNetworkService().release_ip(ip_address)
|
||||
logging.debug("releasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.release_fixed_ip(None, mac, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "release_ip",
|
||||
"args": {"fixed_ip": ip_address}})
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||
{"method": "release_fixed_ip",
|
||||
"args": {"context": None,
|
||||
"mac": mac,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
def init_leases(interface):
|
||||
"""Get the list of hosts for an interface."""
|
||||
net = model.get_network_by_interface(interface)
|
||||
res = ""
|
||||
for address in net.assigned_objs:
|
||||
res += "%s\n" % linux_net.host_dhcp(address)
|
||||
return res
|
||||
network_ref = db.network_get_by_bridge(None, interface)
|
||||
return linux_net.get_dhcp_hosts(None, network_ref['id'])
|
||||
|
||||
|
||||
def main():
|
||||
@@ -86,10 +95,16 @@ def main():
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS.redis_db = 8
|
||||
FLAGS.network_size = 32
|
||||
FLAGS.network_size = 16
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
FLAGS.num_networks = 5
|
||||
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'..',
|
||||
'_trial_temp',
|
||||
'nova.sqlite'))
|
||||
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||
action = argv[1]
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
|
||||
@@ -57,6 +57,8 @@ import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import IPy
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
@@ -65,10 +67,10 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import model
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.endpoint import cloud
|
||||
|
||||
@@ -81,7 +83,6 @@ class VpnCommands(object):
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
self.instdir = model.InstanceDirectory()
|
||||
self.pipe = pipelib.CloudPipe(cloud.CloudController())
|
||||
|
||||
def list(self):
|
||||
@@ -96,8 +97,8 @@ class VpnCommands(object):
|
||||
vpn = self._vpn_for(project.id)
|
||||
if vpn:
|
||||
command = "ping -c1 -w1 %s > /dev/null; echo $?"
|
||||
out, _err = utils.execute( command % vpn['private_dns_name'],
|
||||
check_exit_code=False)
|
||||
out, _err = utils.execute(command % vpn['private_dns_name'],
|
||||
check_exit_code=False)
|
||||
if out.strip() == '0':
|
||||
net = 'up'
|
||||
else:
|
||||
@@ -113,9 +114,8 @@ class VpnCommands(object):
|
||||
|
||||
def _vpn_for(self, project_id):
|
||||
"""Get the VPN instance for a project ID."""
|
||||
for instance in self.instdir.all:
|
||||
if ('image_id' in instance.state
|
||||
and instance['image_id'] == FLAGS.vpn_image_id
|
||||
for instance in db.instance_get_all():
|
||||
if (instance['image_id'] == FLAGS.vpn_image_id
|
||||
and not instance['state_description'] in
|
||||
['shutting_down', 'shutdown']
|
||||
and instance['project_id'] == project_id):
|
||||
@@ -274,6 +274,37 @@ class ProjectCommands(object):
|
||||
with open(filename, 'w') as f:
|
||||
f.write(zip_file)
|
||||
|
||||
class FloatingIpCommands(object):
|
||||
"""Class for managing floating ip."""
|
||||
|
||||
def create(self, host, range):
|
||||
"""Creates floating ips for host by range
|
||||
arguments: host ip_range"""
|
||||
for address in IPy.IP(range):
|
||||
db.floating_ip_create(None, {'address': str(address),
|
||||
'host': host})
|
||||
|
||||
def delete(self, ip_range):
|
||||
"""Deletes floating ips by range
|
||||
arguments: range"""
|
||||
for address in IPy.IP(ip_range):
|
||||
db.floating_ip_destroy(None, str(address))
|
||||
|
||||
|
||||
def list(self, host=None):
|
||||
"""Lists all floating ips (optionally by host)
|
||||
arguments: [host]"""
|
||||
if host == None:
|
||||
floating_ips = db.floating_ip_get_all(None)
|
||||
else:
|
||||
floating_ips = db.floating_ip_get_all_by_host(None, host)
|
||||
for floating_ip in floating_ips:
|
||||
instance = None
|
||||
if floating_ip['fixed_ip']:
|
||||
instance = floating_ip['fixed_ip']['instance']['str_id']
|
||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||
floating_ip['address'],
|
||||
instance)
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
@@ -281,6 +312,7 @@ CATEGORIES = [
|
||||
('role', RoleCommands),
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('floating', FloatingIpCommands)
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -32,17 +32,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import flags
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
|
||||
from nova.network import service
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
# pylint: disable-msg=C0103
|
||||
application = service.type_to_class(FLAGS.network_type).create()
|
||||
application = service.Service.create() # pylint: disable-msg=C0103
|
||||
|
||||
@@ -46,4 +46,4 @@ if __name__ == '__main__':
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
utils.default_flagfile()
|
||||
application = handler.get_application() # pylint: disable-msg=C0103
|
||||
application = handler.get_application() # pylint: disable-msg=C0103
|
||||
|
||||
@@ -32,12 +32,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
from nova.volume import service
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.VolumeService.create() # pylint: disable-msg=C0103
|
||||
application = service.Service.create() # pylint: disable-msg=C0103
|
||||
|
||||
@@ -29,11 +29,11 @@ import uuid
|
||||
import zipfile
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.auth import signer
|
||||
from nova.network import vpn
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -252,6 +252,7 @@ class AuthManager(object):
|
||||
__init__ is run every time AuthManager() is called, so we only
|
||||
reset the driver if it is not set or a new driver is specified.
|
||||
"""
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
if driver or not getattr(self, 'driver', None):
|
||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||
|
||||
@@ -493,8 +494,8 @@ class AuthManager(object):
|
||||
return []
|
||||
return [Project(**project_dict) for project_dict in project_list]
|
||||
|
||||
def create_project(self, name, manager_user,
|
||||
description=None, member_users=None):
|
||||
def create_project(self, name, manager_user, description=None,
|
||||
member_users=None, context=None):
|
||||
"""Create a project
|
||||
|
||||
@type name: str
|
||||
@@ -523,7 +524,14 @@ class AuthManager(object):
|
||||
description,
|
||||
member_users)
|
||||
if project_dict:
|
||||
return Project(**project_dict)
|
||||
project = Project(**project_dict)
|
||||
try:
|
||||
self.network_manager.allocate_network(context,
|
||||
project.id)
|
||||
except:
|
||||
drv.delete_project(project.id)
|
||||
raise
|
||||
return project
|
||||
|
||||
def add_to_project(self, user, project):
|
||||
"""Add user to project"""
|
||||
@@ -550,7 +558,7 @@ class AuthManager(object):
|
||||
Project.safe_id(project))
|
||||
|
||||
@staticmethod
|
||||
def get_project_vpn_data(project):
|
||||
def get_project_vpn_data(project, context=None):
|
||||
"""Gets vpn ip and port for project
|
||||
|
||||
@type project: Project or project_id
|
||||
@@ -560,15 +568,26 @@ class AuthManager(object):
|
||||
@return: A tuple containing (ip, port) or None, None if vpn has
|
||||
not been allocated for user.
|
||||
"""
|
||||
network_data = vpn.NetworkData.lookup(Project.safe_id(project))
|
||||
if not network_data:
|
||||
raise exception.NotFound('project network data has not been set')
|
||||
return (network_data.ip, network_data.port)
|
||||
|
||||
def delete_project(self, project):
|
||||
network_ref = db.project_get_network(context,
|
||||
Project.safe_id(project))
|
||||
|
||||
if not network_ref['vpn_public_port']:
|
||||
raise exception.NotFound('project network data has not been set')
|
||||
return (network_ref['vpn_public_address'],
|
||||
network_ref['vpn_public_port'])
|
||||
|
||||
def delete_project(self, project, context=None):
|
||||
"""Deletes a project"""
|
||||
try:
|
||||
network_ref = db.project_get_network(context,
|
||||
Project.safe_id(project))
|
||||
db.network_destroy(context, network_ref['id'])
|
||||
except:
|
||||
logging.exception('Could not destroy network for %s',
|
||||
project)
|
||||
with self.driver() as drv:
|
||||
return drv.delete_project(Project.safe_id(project))
|
||||
drv.delete_project(Project.safe_id(project))
|
||||
|
||||
def get_user(self, uid):
|
||||
"""Retrieves a user by id"""
|
||||
@@ -703,15 +722,15 @@ class AuthManager(object):
|
||||
zippy.writestr(FLAGS.credential_key_file, private_key)
|
||||
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
||||
|
||||
network_data = vpn.NetworkData.lookup(pid)
|
||||
if network_data:
|
||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||
if vpn_ip:
|
||||
configfile = open(FLAGS.vpn_client_template, "r")
|
||||
s = string.Template(configfile.read())
|
||||
configfile.close()
|
||||
config = s.substitute(keyfile=FLAGS.credential_key_file,
|
||||
certfile=FLAGS.credential_cert_file,
|
||||
ip=network_data.ip,
|
||||
port=network_data.port)
|
||||
ip=vpn_ip,
|
||||
port=vpn_port)
|
||||
zippy.writestr(FLAGS.credential_vpn_file, config)
|
||||
else:
|
||||
logging.warn("No vpn data for project %s" %
|
||||
|
||||
@@ -26,10 +26,7 @@ before trying to run this.
|
||||
import logging
|
||||
import redis
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('redis_host', '127.0.0.1',
|
||||
@@ -54,209 +51,3 @@ class Redis(object):
|
||||
return cls._instance
|
||||
|
||||
|
||||
class ConnectionError(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
def absorb_connection_error(fn):
|
||||
def _wrapper(*args, **kwargs):
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except redis.exceptions.ConnectionError, ce:
|
||||
raise ConnectionError(str(ce))
|
||||
return _wrapper
|
||||
|
||||
|
||||
class BasicModel(object):
|
||||
"""
|
||||
All Redis-backed data derives from this class.
|
||||
|
||||
You MUST specify an identifier() property that returns a unique string
|
||||
per instance.
|
||||
|
||||
You MUST have an initializer that takes a single argument that is a value
|
||||
returned by identifier() to load a new class with.
|
||||
|
||||
You may want to specify a dictionary for default_state().
|
||||
|
||||
You may also specify override_type at the class left to use a key other
|
||||
than __class__.__name__.
|
||||
|
||||
You override save and destroy calls to automatically build and destroy
|
||||
associations.
|
||||
"""
|
||||
|
||||
override_type = None
|
||||
|
||||
@absorb_connection_error
|
||||
def __init__(self):
|
||||
state = Redis.instance().hgetall(self.__redis_key)
|
||||
if state:
|
||||
self.initial_state = state
|
||||
self.state = dict(self.initial_state)
|
||||
else:
|
||||
self.initial_state = {}
|
||||
self.state = self.default_state()
|
||||
|
||||
|
||||
def default_state(self):
|
||||
"""You probably want to define this in your subclass"""
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def _redis_name(cls):
|
||||
return cls.override_type or cls.__name__.lower()
|
||||
|
||||
@classmethod
|
||||
def lookup(cls, identifier):
|
||||
rv = cls(identifier)
|
||||
if rv.is_new_record():
|
||||
return None
|
||||
else:
|
||||
return rv
|
||||
|
||||
@classmethod
|
||||
@absorb_connection_error
|
||||
def all(cls):
|
||||
"""yields all objects in the store"""
|
||||
redis_set = cls._redis_set_name(cls.__name__)
|
||||
for identifier in Redis.instance().smembers(redis_set):
|
||||
yield cls(identifier)
|
||||
|
||||
@classmethod
|
||||
def associated_to(cls, foreign_type, foreign_id):
|
||||
for identifier in cls.associated_keys(foreign_type, foreign_id):
|
||||
yield cls(identifier)
|
||||
|
||||
@classmethod
|
||||
@absorb_connection_error
|
||||
def associated_keys(cls, foreign_type, foreign_id):
|
||||
redis_set = cls._redis_association_name(foreign_type, foreign_id)
|
||||
return Redis.instance().smembers(redis_set) or []
|
||||
|
||||
@classmethod
|
||||
def _redis_set_name(cls, kls_name):
|
||||
# stupidly pluralize (for compatiblity with previous codebase)
|
||||
return kls_name.lower() + "s"
|
||||
|
||||
@classmethod
|
||||
def _redis_association_name(cls, foreign_type, foreign_id):
|
||||
return cls._redis_set_name("%s:%s:%s" %
|
||||
(foreign_type, foreign_id, cls._redis_name()))
|
||||
|
||||
@property
|
||||
def identifier(self):
|
||||
"""You DEFINITELY want to define this in your subclass"""
|
||||
raise NotImplementedError("Your subclass should define identifier")
|
||||
|
||||
@property
|
||||
def __redis_key(self):
|
||||
return '%s:%s' % (self._redis_name(), self.identifier)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s:%s>" % (self.__class__.__name__, self.identifier)
|
||||
|
||||
def keys(self):
|
||||
return self.state.keys()
|
||||
|
||||
def copy(self):
|
||||
copyDict = {}
|
||||
for item in self.keys():
|
||||
copyDict[item] = self[item]
|
||||
return copyDict
|
||||
|
||||
def get(self, item, default):
|
||||
return self.state.get(item, default)
|
||||
|
||||
def update(self, update_dict):
|
||||
return self.state.update(update_dict)
|
||||
|
||||
def setdefault(self, item, default):
|
||||
return self.state.setdefault(item, default)
|
||||
|
||||
def __contains__(self, item):
|
||||
return item in self.state
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.state[item]
|
||||
|
||||
def __setitem__(self, item, val):
|
||||
self.state[item] = val
|
||||
return self.state[item]
|
||||
|
||||
def __delitem__(self, item):
|
||||
"""We don't support this"""
|
||||
raise Exception("Silly monkey, models NEED all their properties.")
|
||||
|
||||
def is_new_record(self):
|
||||
return self.initial_state == {}
|
||||
|
||||
@absorb_connection_error
|
||||
def add_to_index(self):
|
||||
"""Each insance of Foo has its id tracked int the set named Foos"""
|
||||
set_name = self.__class__._redis_set_name(self.__class__.__name__)
|
||||
Redis.instance().sadd(set_name, self.identifier)
|
||||
|
||||
@absorb_connection_error
|
||||
def remove_from_index(self):
|
||||
"""Remove id of this instance from the set tracking ids of this type"""
|
||||
set_name = self.__class__._redis_set_name(self.__class__.__name__)
|
||||
Redis.instance().srem(set_name, self.identifier)
|
||||
|
||||
@absorb_connection_error
|
||||
def associate_with(self, foreign_type, foreign_id):
|
||||
"""Add this class id into the set foreign_type:foreign_id:this_types"""
|
||||
# note the extra 's' on the end is for plurality
|
||||
# to match the old data without requiring a migration of any sort
|
||||
self.add_associated_model_to_its_set(foreign_type, foreign_id)
|
||||
redis_set = self.__class__._redis_association_name(foreign_type,
|
||||
foreign_id)
|
||||
Redis.instance().sadd(redis_set, self.identifier)
|
||||
|
||||
@absorb_connection_error
|
||||
def unassociate_with(self, foreign_type, foreign_id):
|
||||
"""Delete from foreign_type:foreign_id:this_types set"""
|
||||
redis_set = self.__class__._redis_association_name(foreign_type,
|
||||
foreign_id)
|
||||
Redis.instance().srem(redis_set, self.identifier)
|
||||
|
||||
def add_associated_model_to_its_set(self, model_type, model_id):
|
||||
"""
|
||||
When associating an X to a Y, save Y for newer timestamp, etc, and to
|
||||
make sure to save it if Y is a new record.
|
||||
If the model_type isn't found as a usable class, ignore it, this can
|
||||
happen when associating to things stored in LDAP (user, project, ...).
|
||||
"""
|
||||
table = globals()
|
||||
klsname = model_type.capitalize()
|
||||
if table.has_key(klsname):
|
||||
model_class = table[klsname]
|
||||
model_inst = model_class(model_id)
|
||||
model_inst.save()
|
||||
|
||||
@absorb_connection_error
|
||||
def save(self):
|
||||
"""
|
||||
update the directory with the state from this model
|
||||
also add it to the index of items of the same type
|
||||
then set the initial_state = state so new changes are tracked
|
||||
"""
|
||||
# TODO(ja): implement hmset in redis-py and use it
|
||||
# instead of multiple calls to hset
|
||||
if self.is_new_record():
|
||||
self["create_time"] = utils.isotime()
|
||||
for key, val in self.state.iteritems():
|
||||
Redis.instance().hset(self.__redis_key, key, val)
|
||||
self.add_to_index()
|
||||
self.initial_state = dict(self.state)
|
||||
return True
|
||||
|
||||
@absorb_connection_error
|
||||
def destroy(self):
|
||||
"""deletes all related records from datastore."""
|
||||
logging.info("Destroying datamodel for %s %s",
|
||||
self.__class__.__name__, self.identifier)
|
||||
Redis.instance().delete(self.__redis_key)
|
||||
self.remove_from_index()
|
||||
return True
|
||||
|
||||
|
||||
@@ -22,8 +22,9 @@ Admin API controller, exposed through http via the api worker.
|
||||
|
||||
import base64
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.auth import manager
|
||||
from nova.compute import model
|
||||
|
||||
|
||||
def user_dict(user, base64_file=None):
|
||||
@@ -181,7 +182,7 @@ class AdminController(object):
|
||||
result = {
|
||||
'members': [{'member': m} for m in project.member_ids]}
|
||||
return result
|
||||
|
||||
|
||||
@admin_only
|
||||
def modify_project_member(self, context, user, project, operation, **kwargs):
|
||||
"""Add or remove a user from a project."""
|
||||
@@ -193,6 +194,8 @@ class AdminController(object):
|
||||
raise exception.ApiError('operation must be add or remove')
|
||||
return True
|
||||
|
||||
# FIXME(vish): these host commands don't work yet, perhaps some of the
|
||||
# required data can be retrieved from service objects?
|
||||
@admin_only
|
||||
def describe_hosts(self, _context, **_kwargs):
|
||||
"""Returns status info for all nodes. Includes:
|
||||
@@ -203,9 +206,9 @@ class AdminController(object):
|
||||
* DHCP servers running
|
||||
* Iptables / bridges
|
||||
"""
|
||||
return {'hostSet': [host_dict(h) for h in model.Host.all()]}
|
||||
return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
|
||||
|
||||
@admin_only
|
||||
def describe_host(self, _context, name, **_kwargs):
|
||||
"""Returns status info for single node."""
|
||||
return host_dict(model.Host.lookup(name))
|
||||
return host_dict(db.host_get(name))
|
||||
|
||||
@@ -29,22 +29,19 @@ import time
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import datastore
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.auth import rbac
|
||||
from nova.auth import manager
|
||||
from nova.compute import model
|
||||
from nova.compute.instance_types import INSTANCE_TYPES
|
||||
from nova.endpoint import images
|
||||
from nova.network import service as network_service
|
||||
from nova.network import model as network_model
|
||||
from nova.volume import service
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||
|
||||
|
||||
def _gen_key(user_id, key_name):
|
||||
@@ -63,26 +60,16 @@ class CloudController(object):
|
||||
sent to the other nodes.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.instdir = model.InstanceDirectory()
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
self.setup()
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
""" All instances in the system, as dicts """
|
||||
return self.instdir.all
|
||||
|
||||
@property
|
||||
def volumes(self):
|
||||
""" returns a list of all volumes """
|
||||
for volume_id in datastore.Redis.instance().smembers("volumes"):
|
||||
volume = service.get_volume(volume_id)
|
||||
yield volume
|
||||
|
||||
def __str__(self):
|
||||
return 'CloudController'
|
||||
|
||||
def setup(self):
|
||||
""" Ensure the keychains and folders exist. """
|
||||
# FIXME(ja): this should be moved to a nova-manage command,
|
||||
# if not setup throw exceptions instead of running
|
||||
# Create keys folder, if it doesn't exist
|
||||
if not os.path.exists(FLAGS.keys_path):
|
||||
os.makedirs(FLAGS.keys_path)
|
||||
@@ -91,18 +78,15 @@ class CloudController(object):
|
||||
if not os.path.exists(root_ca_path):
|
||||
start = os.getcwd()
|
||||
os.chdir(FLAGS.ca_path)
|
||||
# TODO(vish): Do this with M2Crypto instead
|
||||
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
|
||||
os.chdir(start)
|
||||
# TODO: Do this with M2Crypto instead
|
||||
|
||||
def get_instance_by_ip(self, ip):
|
||||
return self.instdir.by_ip(ip)
|
||||
|
||||
def _get_mpi_data(self, project_id):
|
||||
result = {}
|
||||
for instance in self.instdir.all:
|
||||
if instance['project_id'] == project_id:
|
||||
line = '%s slots=%d' % (instance['private_dns_name'],
|
||||
for instance in db.instance_get_by_project(None, project_id):
|
||||
if instance['fixed_ip']:
|
||||
line = '%s slots=%d' % (instance['fixed_ip']['str_id'],
|
||||
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
|
||||
if instance['key_name'] in result:
|
||||
result[instance['key_name']].append(line)
|
||||
@@ -110,33 +94,30 @@ class CloudController(object):
|
||||
result[instance['key_name']] = [line]
|
||||
return result
|
||||
|
||||
def get_metadata(self, ipaddress):
|
||||
i = self.get_instance_by_ip(ipaddress)
|
||||
if i is None:
|
||||
def get_metadata(self, address):
|
||||
instance_ref = db.fixed_ip_get_instance(None, address)
|
||||
if instance_ref is None:
|
||||
return None
|
||||
mpi = self._get_mpi_data(i['project_id'])
|
||||
if i['key_name']:
|
||||
mpi = self._get_mpi_data(instance_ref['project_id'])
|
||||
if instance_ref['key_name']:
|
||||
keys = {
|
||||
'0': {
|
||||
'_name': i['key_name'],
|
||||
'openssh-key': i['key_data']
|
||||
'_name': instance_ref['key_name'],
|
||||
'openssh-key': instance_ref['key_data']
|
||||
}
|
||||
}
|
||||
else:
|
||||
keys = ''
|
||||
|
||||
address_record = network_model.FixedIp(i['private_dns_name'])
|
||||
if address_record:
|
||||
hostname = address_record['hostname']
|
||||
else:
|
||||
hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-')
|
||||
hostname = instance_ref['hostname']
|
||||
floating_ip = db.instance_get_floating_address(None,
|
||||
instance_ref['id'])
|
||||
data = {
|
||||
'user-data': base64.b64decode(i['user_data']),
|
||||
'user-data': base64.b64decode(instance_ref['user_data']),
|
||||
'meta-data': {
|
||||
'ami-id': i['image_id'],
|
||||
'ami-launch-index': i['ami_launch_index'],
|
||||
'ami-manifest-path': 'FIXME', # image property
|
||||
'block-device-mapping': { # TODO: replace with real data
|
||||
'ami-id': instance_ref['image_id'],
|
||||
'ami-launch-index': instance_ref['launch_index'],
|
||||
'ami-manifest-path': 'FIXME',
|
||||
'block-device-mapping': { # TODO(vish): replace with real data
|
||||
'ami': 'sda1',
|
||||
'ephemeral0': 'sda2',
|
||||
'root': '/dev/sda1',
|
||||
@@ -144,27 +125,27 @@ class CloudController(object):
|
||||
},
|
||||
'hostname': hostname,
|
||||
'instance-action': 'none',
|
||||
'instance-id': i['instance_id'],
|
||||
'instance-type': i.get('instance_type', ''),
|
||||
'instance-id': instance_ref['str_id'],
|
||||
'instance-type': instance_ref['instance_type'],
|
||||
'local-hostname': hostname,
|
||||
'local-ipv4': i['private_dns_name'], # TODO: switch to IP
|
||||
'kernel-id': i.get('kernel_id', ''),
|
||||
'local-ipv4': address,
|
||||
'kernel-id': instance_ref['kernel_id'],
|
||||
'placement': {
|
||||
'availaibility-zone': i.get('availability_zone', 'nova'),
|
||||
'availability-zone': 'nova' # TODO(vish): real zone
|
||||
},
|
||||
'public-hostname': hostname,
|
||||
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
|
||||
'public-ipv4': floating_ip or '',
|
||||
'public-keys': keys,
|
||||
'ramdisk-id': i.get('ramdisk_id', ''),
|
||||
'reservation-id': i['reservation_id'],
|
||||
'security-groups': i.get('groups', ''),
|
||||
'ramdisk-id': instance_ref['ramdisk_id'],
|
||||
'reservation-id': instance_ref['reservation_id'],
|
||||
'security-groups': '',
|
||||
'mpi': mpi
|
||||
}
|
||||
}
|
||||
if False: # TODO: store ancestor ids
|
||||
if False: # TODO(vish): store ancestor ids
|
||||
data['ancestor-ami-ids'] = []
|
||||
if i.get('product_codes', None):
|
||||
data['product-codes'] = i['product_codes']
|
||||
if False: # TODO(vish): store product codes
|
||||
data['product-codes'] = []
|
||||
return data
|
||||
|
||||
@rbac.allow('all')
|
||||
@@ -251,141 +232,114 @@ class CloudController(object):
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def get_console_output(self, context, instance_id, **kwargs):
|
||||
# instance_id is passed in as a list of instances
|
||||
instance = self._get_instance(context, instance_id[0])
|
||||
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
||||
{"method": "get_console_output",
|
||||
"args": {"instance_id": instance_id[0]}})
|
||||
|
||||
def _get_user_id(self, context):
|
||||
if context and context.user:
|
||||
return context.user.id
|
||||
else:
|
||||
return None
|
||||
instance_ref = db.instance_get_by_str(context, instance_id[0])
|
||||
return rpc.call('%s.%s' % (FLAGS.compute_topic,
|
||||
instance_ref['host']),
|
||||
{"method": "get_console_output",
|
||||
"args": {"context": None,
|
||||
"instance_id": instance_ref['id']}})
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def describe_volumes(self, context, **kwargs):
|
||||
volumes = []
|
||||
for volume in self.volumes:
|
||||
if context.user.is_admin() or volume['project_id'] == context.project.id:
|
||||
v = self.format_volume(context, volume)
|
||||
volumes.append(v)
|
||||
return defer.succeed({'volumeSet': volumes})
|
||||
if context.user.is_admin():
|
||||
volumes = db.volume_get_all(context)
|
||||
else:
|
||||
volumes = db.volume_get_by_project(context, context.project.id)
|
||||
|
||||
def format_volume(self, context, volume):
|
||||
volumes = [self._format_volume(context, v) for v in volumes]
|
||||
|
||||
return {'volumeSet': volumes}
|
||||
|
||||
def _format_volume(self, context, volume):
|
||||
v = {}
|
||||
v['volumeId'] = volume['volume_id']
|
||||
v['volumeId'] = volume['str_id']
|
||||
v['status'] = volume['status']
|
||||
v['size'] = volume['size']
|
||||
v['availabilityZone'] = volume['availability_zone']
|
||||
v['createTime'] = volume['create_time']
|
||||
v['createTime'] = volume['created_at']
|
||||
if context.user.is_admin():
|
||||
v['status'] = '%s (%s, %s, %s, %s)' % (
|
||||
volume.get('status', None),
|
||||
volume.get('user_id', None),
|
||||
volume.get('node_name', None),
|
||||
volume.get('instance_id', ''),
|
||||
volume.get('mountpoint', ''))
|
||||
volume['status'],
|
||||
volume['user_id'],
|
||||
volume['host'],
|
||||
volume['instance_id'],
|
||||
volume['mountpoint'])
|
||||
if volume['attach_status'] == 'attached':
|
||||
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
|
||||
'deleteOnTermination': volume['delete_on_termination'],
|
||||
'deleteOnTermination': False,
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': volume['instance_id'],
|
||||
'status': 'attached',
|
||||
'volume_id': volume['volume_id']}]
|
||||
'volume_id': volume['str_id']}]
|
||||
else:
|
||||
v['attachmentSet'] = [{}]
|
||||
return v
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
@defer.inlineCallbacks
|
||||
def create_volume(self, context, size, **kwargs):
|
||||
# TODO(vish): refactor this to create the volume object here and tell service to create it
|
||||
result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
|
||||
"args": {"size": size,
|
||||
"user_id": context.user.id,
|
||||
"project_id": context.project.id}})
|
||||
# NOTE(vish): rpc returned value is in the result key in the dictionary
|
||||
volume = self._get_volume(context, result)
|
||||
defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['user_id'] = context.user.id
|
||||
vol['project_id'] = context.project.id
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
vol['status'] = "creating"
|
||||
vol['attach_status'] = "detached"
|
||||
volume_ref = db.volume_create(context, vol)
|
||||
|
||||
def _get_address(self, context, public_ip):
|
||||
# FIXME(vish) this should move into network.py
|
||||
address = network_model.ElasticIp.lookup(public_ip)
|
||||
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
|
||||
return address
|
||||
raise exception.NotFound("Address at ip %s not found" % public_ip)
|
||||
rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_ref['id']}})
|
||||
|
||||
def _get_image(self, context, image_id):
|
||||
"""passes in context because
|
||||
objectstore does its own authorization"""
|
||||
result = images.list(context, [image_id])
|
||||
if not result:
|
||||
raise exception.NotFound('Image %s could not be found' % image_id)
|
||||
image = result[0]
|
||||
return image
|
||||
return {'volumeSet': [self._format_volume(context, volume_ref)]}
|
||||
|
||||
def _get_instance(self, context, instance_id):
|
||||
for instance in self.instdir.all:
|
||||
if instance['instance_id'] == instance_id:
|
||||
if context.user.is_admin() or instance['project_id'] == context.project.id:
|
||||
return instance
|
||||
raise exception.NotFound('Instance %s could not be found' % instance_id)
|
||||
|
||||
def _get_volume(self, context, volume_id):
|
||||
volume = service.get_volume(volume_id)
|
||||
if context.user.is_admin() or volume['project_id'] == context.project.id:
|
||||
return volume
|
||||
raise exception.NotFound('Volume %s could not be found' % volume_id)
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||
volume = self._get_volume(context, volume_id)
|
||||
if volume['status'] == "attached":
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['attach_status'] == "attached":
|
||||
raise exception.ApiError("Volume is already attached")
|
||||
# TODO(vish): looping through all volumes is slow. We should probably maintain an index
|
||||
for vol in self.volumes:
|
||||
if vol['instance_id'] == instance_id and vol['mountpoint'] == device:
|
||||
raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint']))
|
||||
volume.start_attach(instance_id, device)
|
||||
instance = self._get_instance(context, instance_id)
|
||||
compute_node = instance['node_name']
|
||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
|
||||
instance_ref = db.instance_get_by_str(context, instance_id)
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "attach_volume",
|
||||
"args": {"volume_id": volume_id,
|
||||
"instance_id": instance_id,
|
||||
"mountpoint": device}})
|
||||
return defer.succeed({'attachTime': volume['attach_time'],
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': instance_id,
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_ref['id'],
|
||||
"instance_id": instance_ref['id'],
|
||||
"mountpoint": device}})
|
||||
return defer.succeed({'attachTime': volume_ref['attach_time'],
|
||||
'device': volume_ref['mountpoint'],
|
||||
'instanceId': instance_ref['id'],
|
||||
'requestId': context.request_id,
|
||||
'status': volume['attach_status'],
|
||||
'volumeId': volume_id})
|
||||
'status': volume_ref['attach_status'],
|
||||
'volumeId': volume_ref['id']})
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def detach_volume(self, context, volume_id, **kwargs):
|
||||
volume = self._get_volume(context, volume_id)
|
||||
instance_id = volume.get('instance_id', None)
|
||||
if not instance_id:
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
instance_ref = db.volume_get_instance(context, volume_ref['id'])
|
||||
if not instance_ref:
|
||||
raise exception.Error("Volume isn't attached to anything!")
|
||||
if volume['status'] == "available":
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] == "available":
|
||||
raise exception.Error("Volume is already detached")
|
||||
try:
|
||||
volume.start_detach()
|
||||
instance = self._get_instance(context, instance_id)
|
||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "detach_volume",
|
||||
"args": {"instance_id": instance_id,
|
||||
"volume_id": volume_id}})
|
||||
"args": {"context": None,
|
||||
"instance_id": instance_ref['id'],
|
||||
"volume_id": volume_ref['id']}})
|
||||
except exception.NotFound:
|
||||
# If the instance doesn't exist anymore,
|
||||
# then we need to call detach blind
|
||||
volume.finish_detach()
|
||||
return defer.succeed({'attachTime': volume['attach_time'],
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': instance_id,
|
||||
db.volume_detached(context)
|
||||
return defer.succeed({'attachTime': volume_ref['attach_time'],
|
||||
'device': volume_ref['mountpoint'],
|
||||
'instanceId': instance_ref['str_id'],
|
||||
'requestId': context.request_id,
|
||||
'status': volume['attach_status'],
|
||||
'volumeId': volume_id})
|
||||
'status': volume_ref['attach_status'],
|
||||
'volumeId': volume_ref['id']})
|
||||
|
||||
def _convert_to_set(self, lst, label):
|
||||
if lst == None or lst == []:
|
||||
@@ -406,52 +360,55 @@ class CloudController(object):
|
||||
assert len(i) == 1
|
||||
return i[0]
|
||||
|
||||
def _format_instances(self, context, reservation_id = None):
|
||||
def _format_instances(self, context, reservation_id=None):
|
||||
reservations = {}
|
||||
if context.user.is_admin():
|
||||
instgenerator = self.instdir.all
|
||||
if reservation_id:
|
||||
instances = db.instance_get_by_reservation(context,
|
||||
reservation_id)
|
||||
else:
|
||||
instgenerator = self.instdir.by_project(context.project.id)
|
||||
for instance in instgenerator:
|
||||
res_id = instance.get('reservation_id', 'Unknown')
|
||||
if reservation_id != None and reservation_id != res_id:
|
||||
continue
|
||||
if not context.user.is_admin():
|
||||
instances = db.instance_get_all(context)
|
||||
else:
|
||||
instances = db.instance_get_by_project(context,
|
||||
context.project.id)
|
||||
for instance in instances:
|
||||
if not context.user.is_admin():
|
||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||
continue
|
||||
i = {}
|
||||
i['instance_id'] = instance.get('instance_id', None)
|
||||
i['image_id'] = instance.get('image_id', None)
|
||||
i['instance_state'] = {
|
||||
'code': instance.get('state', 0),
|
||||
'name': instance.get('state_description', 'pending')
|
||||
i['instanceId'] = instance['str_id']
|
||||
i['imageId'] = instance['image_id']
|
||||
i['instanceState'] = {
|
||||
'code': instance['state'],
|
||||
'name': instance['state_description']
|
||||
}
|
||||
i['public_dns_name'] = network_model.get_public_ip_for_instance(
|
||||
i['instance_id'])
|
||||
i['private_dns_name'] = instance.get('private_dns_name', None)
|
||||
if not i['public_dns_name']:
|
||||
i['public_dns_name'] = i['private_dns_name']
|
||||
i['dns_name'] = instance.get('dns_name', None)
|
||||
i['key_name'] = instance.get('key_name', None)
|
||||
fixed_addr = None
|
||||
floating_addr = None
|
||||
if instance['fixed_ip']:
|
||||
fixed_addr = instance['fixed_ip']['str_id']
|
||||
if instance['fixed_ip']['floating_ips']:
|
||||
fixed = instance['fixed_ip']
|
||||
floating_addr = fixed['floating_ips'][0]['str_id']
|
||||
i['privateDnsName'] = fixed_addr
|
||||
i['publicDnsName'] = floating_addr
|
||||
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
|
||||
i['keyName'] = instance['key_name']
|
||||
if context.user.is_admin():
|
||||
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
|
||||
instance.get('project_id', None),
|
||||
instance.get('node_name', ''))
|
||||
i['product_codes_set'] = self._convert_to_set(
|
||||
instance.get('product_codes', None), 'product_code')
|
||||
i['instance_type'] = instance.get('instance_type', None)
|
||||
i['launch_time'] = instance.get('launch_time', None)
|
||||
i['ami_launch_index'] = instance.get('ami_launch_index',
|
||||
None)
|
||||
if not reservations.has_key(res_id):
|
||||
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
|
||||
instance['project_id'],
|
||||
instance['host'])
|
||||
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
|
||||
i['instanceType'] = instance['instance_type']
|
||||
i['launchTime'] = instance['created_at']
|
||||
i['amiLaunchIndex'] = instance['launch_index']
|
||||
if not reservations.has_key(instance['reservation_id']):
|
||||
r = {}
|
||||
r['reservation_id'] = res_id
|
||||
r['owner_id'] = instance.get('project_id', None)
|
||||
r['group_set'] = self._convert_to_set(
|
||||
instance.get('groups', None), 'group_id')
|
||||
r['instances_set'] = []
|
||||
reservations[res_id] = r
|
||||
reservations[res_id]['instances_set'].append(i)
|
||||
r['reservationId'] = instance['reservation_id']
|
||||
r['ownerId'] = instance['project_id']
|
||||
r['groupSet'] = self._convert_to_set([], 'groups')
|
||||
r['instancesSet'] = []
|
||||
reservations[instance['reservation_id']] = r
|
||||
reservations[instance['reservation_id']]['instancesSet'].append(i)
|
||||
|
||||
return list(reservations.values())
|
||||
|
||||
@@ -461,20 +418,23 @@ class CloudController(object):
|
||||
|
||||
def format_addresses(self, context):
|
||||
addresses = []
|
||||
for address in network_model.ElasticIp.all():
|
||||
# TODO(vish): implement a by_project iterator for addresses
|
||||
if (context.user.is_admin() or
|
||||
address['project_id'] == context.project.id):
|
||||
address_rv = {
|
||||
'public_ip': address['address'],
|
||||
'instance_id': address.get('instance_id', 'free')
|
||||
}
|
||||
if context.user.is_admin():
|
||||
address_rv['instance_id'] = "%s (%s, %s)" % (
|
||||
address['instance_id'],
|
||||
address['user_id'],
|
||||
address['project_id'],
|
||||
)
|
||||
if context.user.is_admin():
|
||||
iterator = db.floating_ip_get_all(context)
|
||||
else:
|
||||
iterator = db.floating_ip_get_by_project(context,
|
||||
context.project.id)
|
||||
for floating_ip_ref in iterator:
|
||||
address = floating_ip_ref['str_id']
|
||||
instance_id = None
|
||||
if (floating_ip_ref['fixed_ip']
|
||||
and floating_ip_ref['fixed_ip']['instance']):
|
||||
instance_id = floating_ip_ref['fixed_ip']['instance']['str_id']
|
||||
address_rv = {'public_ip': address,
|
||||
'instance_id': instance_id}
|
||||
if context.user.is_admin():
|
||||
details = "%s (%s)" % (address_rv['instance_id'],
|
||||
floating_ip_ref['project_id'])
|
||||
address_rv['instance_id'] = details
|
||||
addresses.append(address_rv)
|
||||
return {'addressesSet': addresses}
|
||||
|
||||
@@ -483,8 +443,8 @@ class CloudController(object):
|
||||
def allocate_address(self, context, **kwargs):
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
public_ip = yield rpc.call(network_topic,
|
||||
{"method": "allocate_elastic_ip",
|
||||
"args": {"user_id": context.user.id,
|
||||
{"method": "allocate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"project_id": context.project.id}})
|
||||
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
|
||||
|
||||
@@ -492,56 +452,62 @@ class CloudController(object):
|
||||
@defer.inlineCallbacks
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
# NOTE(vish): Should we make sure this works?
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "deallocate_elastic_ip",
|
||||
"args": {"elastic_ip": public_ip}})
|
||||
{"method": "deallocate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id']}})
|
||||
defer.returnValue({'releaseResponse': ["Address released."]})
|
||||
|
||||
@rbac.allow('netadmin')
|
||||
@defer.inlineCallbacks
|
||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||
instance = self._get_instance(context, instance_id)
|
||||
address = self._get_address(context, public_ip)
|
||||
instance_ref = db.instance_get_by_str(context, instance_id)
|
||||
fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id'])
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "associate_elastic_ip",
|
||||
"args": {"elastic_ip": address['address'],
|
||||
"fixed_ip": instance['private_dns_name'],
|
||||
"instance_id": instance['instance_id']}})
|
||||
{"method": "associate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id'],
|
||||
"fixed_address": fixed_ip_ref['str_id']}})
|
||||
defer.returnValue({'associateResponse': ["Address associated."]})
|
||||
|
||||
@rbac.allow('netadmin')
|
||||
@defer.inlineCallbacks
|
||||
def disassociate_address(self, context, public_ip, **kwargs):
|
||||
address = self._get_address(context, public_ip)
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "disassociate_elastic_ip",
|
||||
"args": {"elastic_ip": address['address']}})
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id']}})
|
||||
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_network_topic(self, context):
|
||||
"""Retrieves the network host for a project"""
|
||||
host = network_service.get_host_for_project(context.project.id)
|
||||
network_ref = db.project_get_network(context, context.project.id)
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
host = yield rpc.call(FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"user_id": context.user.id,
|
||||
"project_id": context.project.id}})
|
||||
defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
|
||||
{"method": "set_network_host",
|
||||
"args": {"context": None,
|
||||
"project_id": context.project.id}})
|
||||
defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
@defer.inlineCallbacks
|
||||
def run_instances(self, context, **kwargs):
|
||||
# make sure user can access the image
|
||||
# vpn image is private so it doesn't show up on lists
|
||||
if kwargs['image_id'] != FLAGS.vpn_image_id:
|
||||
image = self._get_image(context, kwargs['image_id'])
|
||||
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
|
||||
|
||||
# FIXME(ja): if image is cloudpipe, this breaks
|
||||
if not vpn:
|
||||
image = images.get(context, kwargs['image_id'])
|
||||
|
||||
# FIXME(ja): if image is vpn, this breaks
|
||||
# get defaults from imagestore
|
||||
image_id = image['imageId']
|
||||
kernel_id = image.get('kernelId', FLAGS.default_kernel)
|
||||
@@ -552,11 +518,10 @@ class CloudController(object):
|
||||
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
|
||||
|
||||
# make sure we have access to kernel and ramdisk
|
||||
self._get_image(context, kernel_id)
|
||||
self._get_image(context, ramdisk_id)
|
||||
images.get(context, kernel_id)
|
||||
images.get(context, ramdisk_id)
|
||||
|
||||
logging.debug("Going to run instances...")
|
||||
reservation_id = utils.generate_uid('r')
|
||||
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||
key_data = None
|
||||
if kwargs.has_key('key_name'):
|
||||
@@ -565,107 +530,122 @@ class CloudController(object):
|
||||
raise exception.ApiError('Key Pair %s not found' %
|
||||
kwargs['key_name'])
|
||||
key_data = key_pair.public_key
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
|
||||
# TODO: Get the real security group of launch in here
|
||||
security_group = "default"
|
||||
for num in range(int(kwargs['max_count'])):
|
||||
is_vpn = False
|
||||
if image_id == FLAGS.vpn_image_id:
|
||||
is_vpn = True
|
||||
inst = self.instdir.new()
|
||||
allocate_data = yield rpc.call(network_topic,
|
||||
{"method": "allocate_fixed_ip",
|
||||
"args": {"user_id": context.user.id,
|
||||
"project_id": context.project.id,
|
||||
"security_group": security_group,
|
||||
"is_vpn": is_vpn,
|
||||
"hostname": inst.instance_id}})
|
||||
inst['image_id'] = image_id
|
||||
inst['kernel_id'] = kernel_id
|
||||
inst['ramdisk_id'] = ramdisk_id
|
||||
inst['user_data'] = kwargs.get('user_data', '')
|
||||
inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
||||
inst['reservation_id'] = reservation_id
|
||||
inst['launch_time'] = launch_time
|
||||
inst['key_data'] = key_data or ''
|
||||
inst['key_name'] = kwargs.get('key_name', '')
|
||||
inst['user_id'] = context.user.id
|
||||
inst['project_id'] = context.project.id
|
||||
inst['ami_launch_index'] = num
|
||||
inst['security_group'] = security_group
|
||||
inst['hostname'] = inst.instance_id
|
||||
for (key, value) in allocate_data.iteritems():
|
||||
inst[key] = value
|
||||
|
||||
inst.save()
|
||||
reservation_id = utils.generate_uid('r')
|
||||
base_options = {}
|
||||
base_options['image_id'] = image_id
|
||||
base_options['kernel_id'] = kernel_id
|
||||
base_options['ramdisk_id'] = ramdisk_id
|
||||
base_options['reservation_id'] = reservation_id
|
||||
base_options['key_data'] = key_data
|
||||
base_options['key_name'] = kwargs.get('key_name', None)
|
||||
base_options['user_id'] = context.user.id
|
||||
base_options['project_id'] = context.project.id
|
||||
base_options['user_data'] = kwargs.get('user_data', '')
|
||||
base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
||||
base_options['security_group'] = security_group
|
||||
|
||||
for num in range(int(kwargs['max_count'])):
|
||||
instance_ref = db.instance_create(context, base_options)
|
||||
inst_id = instance_ref['id']
|
||||
|
||||
inst = {}
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['launch_index'] = num
|
||||
inst['hostname'] = instance_ref['str_id']
|
||||
db.instance_update(context, inst_id, inst)
|
||||
address = self.network_manager.allocate_fixed_ip(context,
|
||||
inst_id,
|
||||
vpn)
|
||||
|
||||
# TODO(vish): This probably should be done in the scheduler
|
||||
# network is setup when host is assigned
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.call(network_topic,
|
||||
{"method": "setup_fixed_ip",
|
||||
"args": {"context": None,
|
||||
"address": address}})
|
||||
|
||||
rpc.cast(FLAGS.compute_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"instance_id": inst.instance_id}})
|
||||
logging.debug("Casting to node for %s's instance with IP of %s" %
|
||||
(context.user.name, inst['private_dns_name']))
|
||||
# TODO: Make Network figure out the network name from ip.
|
||||
defer.returnValue(self._format_run_instances(context, reservation_id))
|
||||
"args": {"context": None,
|
||||
"instance_id": inst_id}})
|
||||
logging.debug("Casting to node for %s/%s's instance %s" %
|
||||
(context.project.name, context.user.name, inst_id))
|
||||
defer.returnValue(self._format_run_instances(context,
|
||||
reservation_id))
|
||||
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
@defer.inlineCallbacks
|
||||
def terminate_instances(self, context, instance_id, **kwargs):
|
||||
logging.debug("Going to start terminating instances")
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
for i in instance_id:
|
||||
logging.debug("Going to try and terminate %s" % i)
|
||||
for id_str in instance_id:
|
||||
logging.debug("Going to try and terminate %s" % id_str)
|
||||
try:
|
||||
instance = self._get_instance(context, i)
|
||||
instance_ref = db.instance_get_by_str(context, id_str)
|
||||
except exception.NotFound:
|
||||
logging.warning("Instance %s was not found during terminate"
|
||||
% i)
|
||||
% id_str)
|
||||
continue
|
||||
elastic_ip = network_model.get_public_ip_for_instance(i)
|
||||
if elastic_ip:
|
||||
logging.debug("Disassociating address %s" % elastic_ip)
|
||||
|
||||
# FIXME(ja): where should network deallocate occur?
|
||||
address = db.instance_get_floating_address(context,
|
||||
instance_ref['id'])
|
||||
if address:
|
||||
logging.debug("Disassociating address %s" % address)
|
||||
# NOTE(vish): Right now we don't really care if the ip is
|
||||
# disassociated. We may need to worry about
|
||||
# checking this later. Perhaps in the scheduler?
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "disassociate_elastic_ip",
|
||||
"args": {"elastic_ip": elastic_ip}})
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"address": address}})
|
||||
|
||||
fixed_ip = instance.get('private_dns_name', None)
|
||||
if fixed_ip:
|
||||
logging.debug("Deallocating address %s" % fixed_ip)
|
||||
# NOTE(vish): Right now we don't really care if the ip is
|
||||
# actually removed. We may need to worry about
|
||||
# checking this later. Perhaps in the scheduler?
|
||||
rpc.cast(network_topic,
|
||||
{"method": "deallocate_fixed_ip",
|
||||
"args": {"fixed_ip": fixed_ip}})
|
||||
address = db.instance_get_fixed_address(context,
|
||||
instance_ref['id'])
|
||||
if address:
|
||||
logging.debug("Deallocating address %s" % address)
|
||||
# NOTE(vish): Currently, nothing needs to be done on the
|
||||
# network node until release. If this changes,
|
||||
# we will need to cast here.
|
||||
self.network.deallocate_fixed_ip(context, address)
|
||||
|
||||
if instance.get('node_name', 'unassigned') != 'unassigned':
|
||||
# NOTE(joshua?): It's also internal default
|
||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
||||
host = instance_ref['host']
|
||||
if host:
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "terminate_instance",
|
||||
"args": {"instance_id": i}})
|
||||
"args": {"context": None,
|
||||
"instance_id": instance_ref['id']}})
|
||||
else:
|
||||
instance.destroy()
|
||||
db.instance_destroy(context, instance_ref['id'])
|
||||
defer.returnValue(True)
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def reboot_instances(self, context, instance_id, **kwargs):
|
||||
"""instance_id is a list of instance ids"""
|
||||
for i in instance_id:
|
||||
instance = self._get_instance(context, i)
|
||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": i}})
|
||||
for id_str in instance_id:
|
||||
instance_ref = db.instance_get_by_str(context, id_str)
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"context": None,
|
||||
"instance_id": instance_ref['id']}})
|
||||
return defer.succeed(True)
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def delete_volume(self, context, volume_id, **kwargs):
|
||||
# TODO: return error if not authorized
|
||||
volume = self._get_volume(context, volume_id)
|
||||
volume_node = volume['node_name']
|
||||
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
host = volume_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
|
||||
{"method": "delete_volume",
|
||||
"args": {"volume_id": volume_id}})
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_ref['id']}})
|
||||
return defer.succeed(True)
|
||||
|
||||
@rbac.allow('all')
|
||||
@@ -716,23 +696,3 @@ class CloudController(object):
|
||||
raise exception.ApiError('operation_type must be add or remove')
|
||||
result = images.modify(context, image_id, operation_type)
|
||||
return defer.succeed(result)
|
||||
|
||||
def update_state(self, topic, value):
|
||||
""" accepts status reports from the queue and consolidates them """
|
||||
# TODO(jmc): if an instance has disappeared from
|
||||
# the node, call instance_death
|
||||
if topic == "instances":
|
||||
return defer.succeed(True)
|
||||
aggregate_state = getattr(self, topic)
|
||||
node_name = value.keys()[0]
|
||||
items = value[node_name]
|
||||
|
||||
logging.debug("Updating %s state for %s" % (topic, node_name))
|
||||
|
||||
for item_id in items.keys():
|
||||
if (aggregate_state.has_key('pending') and
|
||||
aggregate_state['pending'].has_key(item_id)):
|
||||
del aggregate_state['pending'][item_id]
|
||||
aggregate_state[node_name] = items
|
||||
|
||||
return defer.succeed(True)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
"""
|
||||
Proxy AMI-related calls from the cloud controller, to the running
|
||||
objectstore daemon.
|
||||
objectstore service.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -26,6 +26,7 @@ import urllib
|
||||
|
||||
import boto.s3.connection
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
@@ -55,7 +56,6 @@ def register(context, image_location):
|
||||
|
||||
return image_id
|
||||
|
||||
|
||||
def list(context, filter_list=[]):
|
||||
""" return a list of all images that a user can see
|
||||
|
||||
@@ -71,6 +71,14 @@ def list(context, filter_list=[]):
|
||||
return [i for i in result if i['imageId'] in filter_list]
|
||||
return result
|
||||
|
||||
def get(context, image_id):
|
||||
"""return a image object if the context has permissions"""
|
||||
result = list(context, [image_id])
|
||||
if not result:
|
||||
raise exception.NotFound('Image %s could not be found' % image_id)
|
||||
image = result[0]
|
||||
return image
|
||||
|
||||
|
||||
def deregister(context, image_id):
|
||||
""" unregister an image """
|
||||
|
||||
@@ -22,6 +22,7 @@ where they're used.
|
||||
"""
|
||||
|
||||
import getopt
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
@@ -34,7 +35,7 @@ class FlagValues(gflags.FlagValues):
|
||||
Unknown flags will be ignored when parsing the command line, but the
|
||||
command line will be kept so that it can be replayed if new flags are
|
||||
defined after the initial parsing.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
@@ -50,7 +51,7 @@ class FlagValues(gflags.FlagValues):
|
||||
# leftover args at the end
|
||||
sneaky_unparsed_args = {"value": None}
|
||||
original_argv = list(argv)
|
||||
|
||||
|
||||
if self.IsGnuGetOpt():
|
||||
orig_getopt = getattr(getopt, 'gnu_getopt')
|
||||
orig_name = 'gnu_getopt'
|
||||
@@ -74,14 +75,14 @@ class FlagValues(gflags.FlagValues):
|
||||
unparsed_args = sneaky_unparsed_args['value']
|
||||
if unparsed_args:
|
||||
if self.IsGnuGetOpt():
|
||||
args = argv[:1] + unparsed
|
||||
args = argv[:1] + unparsed_args
|
||||
else:
|
||||
args = argv[:1] + original_argv[-len(unparsed_args):]
|
||||
else:
|
||||
args = argv[:1]
|
||||
finally:
|
||||
setattr(getopt, orig_name, orig_getopt)
|
||||
|
||||
|
||||
# Store the arguments for later, we'll need them for new flags
|
||||
# added at runtime
|
||||
self.__dict__['__stored_argv'] = original_argv
|
||||
@@ -92,7 +93,7 @@ class FlagValues(gflags.FlagValues):
|
||||
def SetDirty(self, name):
|
||||
"""Mark a flag as dirty so that accessing it will case a reparse."""
|
||||
self.__dict__['__dirty'].append(name)
|
||||
|
||||
|
||||
def IsDirty(self, name):
|
||||
return name in self.__dict__['__dirty']
|
||||
|
||||
@@ -113,12 +114,12 @@ class FlagValues(gflags.FlagValues):
|
||||
for k in self.__dict__['__dirty']:
|
||||
setattr(self, k, getattr(new_flags, k))
|
||||
self.ClearDirty()
|
||||
|
||||
|
||||
def __setitem__(self, name, flag):
|
||||
gflags.FlagValues.__setitem__(self, name, flag)
|
||||
if self.WasAlreadyParsed():
|
||||
self.SetDirty(name)
|
||||
|
||||
|
||||
def __getitem__(self, name):
|
||||
if self.IsDirty(name):
|
||||
self.ParseNewFlags()
|
||||
@@ -202,9 +203,20 @@ DEFINE_string('vpn_key_suffix',
|
||||
|
||||
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
||||
|
||||
DEFINE_string('sql_connection',
|
||||
'sqlite:///%s/nova.sqlite' % os.path.abspath("./"),
|
||||
'connection string for sql database')
|
||||
|
||||
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||
'Manager for compute')
|
||||
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
||||
'Manager for network')
|
||||
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
||||
'Manager for volume')
|
||||
|
||||
DEFINE_string('host', socket.gethostname(),
|
||||
'name of this node')
|
||||
|
||||
# UNUSED
|
||||
DEFINE_string('node_availability_zone', 'nova',
|
||||
'availability zone of this node')
|
||||
DEFINE_string('node_name', socket.gethostname(),
|
||||
'name of this node')
|
||||
|
||||
|
||||
@@ -18,9 +18,10 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Process pool, still buggy right now.
|
||||
Process pool using twisted threading
|
||||
"""
|
||||
|
||||
import logging
|
||||
import StringIO
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -29,30 +30,14 @@ from twisted.internet import protocol
|
||||
from twisted.internet import reactor
|
||||
|
||||
from nova import flags
|
||||
from nova.utils import ProcessExecutionError
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('process_pool_size', 4,
|
||||
'Number of processes to use in the process pool')
|
||||
|
||||
|
||||
# NOTE(termie): this is copied from twisted.internet.utils but since
|
||||
# they don't export it I've copied and modified
|
||||
class UnexpectedErrorOutput(IOError):
|
||||
"""
|
||||
Standard error data was received where it was not expected. This is a
|
||||
subclass of L{IOError} to preserve backward compatibility with the previous
|
||||
error behavior of L{getProcessOutput}.
|
||||
|
||||
@ivar processEnded: A L{Deferred} which will fire when the process which
|
||||
produced the data on stderr has ended (exited and all file descriptors
|
||||
closed).
|
||||
"""
|
||||
def __init__(self, stdout=None, stderr=None):
|
||||
IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr))
|
||||
|
||||
|
||||
# This is based on _BackRelay from twister.internal.utils, but modified to
|
||||
# capture both stdout and stderr, without odd stderr handling, and also to
|
||||
# This is based on _BackRelay from twister.internal.utils, but modified to
|
||||
# capture both stdout and stderr, without odd stderr handling, and also to
|
||||
# handle stdin
|
||||
class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
"""
|
||||
@@ -62,22 +47,23 @@ class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
@ivar deferred: A L{Deferred} which will be called back with all of stdout
|
||||
and all of stderr as well (as a tuple). C{terminate_on_stderr} is true
|
||||
and any bytes are received over stderr, this will fire with an
|
||||
L{_UnexpectedErrorOutput} instance and the attribute will be set to
|
||||
L{_ProcessExecutionError} instance and the attribute will be set to
|
||||
C{None}.
|
||||
|
||||
@ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
|
||||
received over stderr, this attribute will refer to a L{Deferred} which
|
||||
will be called back when the process ends. This C{Deferred} is also
|
||||
associated with the L{_UnexpectedErrorOutput} which C{deferred} fires
|
||||
with earlier in this case so that users can determine when the process
|
||||
@ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
|
||||
received over stderr, this attribute will refer to a L{Deferred} which
|
||||
will be called back when the process ends. This C{Deferred} is also
|
||||
associated with the L{_ProcessExecutionError} which C{deferred} fires
|
||||
with earlier in this case so that users can determine when the process
|
||||
has actually ended, in addition to knowing when bytes have been received
|
||||
via stderr.
|
||||
"""
|
||||
|
||||
def __init__(self, deferred, started_deferred=None,
|
||||
terminate_on_stderr=False, check_exit_code=True,
|
||||
process_input=None):
|
||||
def __init__(self, deferred, cmd, started_deferred=None,
|
||||
terminate_on_stderr=False, check_exit_code=True,
|
||||
process_input=None):
|
||||
self.deferred = deferred
|
||||
self.cmd = cmd
|
||||
self.stdout = StringIO.StringIO()
|
||||
self.stderr = StringIO.StringIO()
|
||||
self.started_deferred = started_deferred
|
||||
@@ -85,14 +71,18 @@ class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
self.check_exit_code = check_exit_code
|
||||
self.process_input = process_input
|
||||
self.on_process_ended = None
|
||||
|
||||
|
||||
def _build_execution_error(self, exit_code=None):
|
||||
return ProcessExecutionError(cmd=self.cmd,
|
||||
exit_code=exit_code,
|
||||
stdout=self.stdout.getvalue(),
|
||||
stderr=self.stderr.getvalue())
|
||||
|
||||
def errReceived(self, text):
|
||||
self.stderr.write(text)
|
||||
if self.terminate_on_stderr and (self.deferred is not None):
|
||||
self.on_process_ended = defer.Deferred()
|
||||
self.deferred.errback(UnexpectedErrorOutput(
|
||||
stdout=self.stdout.getvalue(),
|
||||
stderr=self.stderr.getvalue()))
|
||||
self.deferred.errback(self._build_execution_error())
|
||||
self.deferred = None
|
||||
self.transport.loseConnection()
|
||||
|
||||
@@ -102,15 +92,19 @@ class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
def processEnded(self, reason):
|
||||
if self.deferred is not None:
|
||||
stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
|
||||
try:
|
||||
if self.check_exit_code:
|
||||
reason.trap(error.ProcessDone)
|
||||
self.deferred.callback((stdout, stderr))
|
||||
except:
|
||||
# NOTE(justinsb): This logic is a little suspicious to me...
|
||||
# If the callback throws an exception, then errback will be
|
||||
# called also. However, this is what the unit tests test for...
|
||||
self.deferred.errback(UnexpectedErrorOutput(stdout, stderr))
|
||||
exit_code = reason.value.exitCode
|
||||
if self.check_exit_code and exit_code <> 0:
|
||||
self.deferred.errback(self._build_execution_error(exit_code))
|
||||
else:
|
||||
try:
|
||||
if self.check_exit_code:
|
||||
reason.trap(error.ProcessDone)
|
||||
self.deferred.callback((stdout, stderr))
|
||||
except:
|
||||
# NOTE(justinsb): This logic is a little suspicious to me...
|
||||
# If the callback throws an exception, then errback will be
|
||||
# called also. However, this is what the unit tests test for...
|
||||
self.deferred.errback(self._build_execution_error(exit_code))
|
||||
elif self.on_process_ended is not None:
|
||||
self.on_process_ended.errback(reason)
|
||||
|
||||
@@ -122,8 +116,8 @@ class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
self.transport.write(self.process_input)
|
||||
self.transport.closeStdin()
|
||||
|
||||
def get_process_output(executable, args=None, env=None, path=None,
|
||||
process_reactor=None, check_exit_code=True,
|
||||
def get_process_output(executable, args=None, env=None, path=None,
|
||||
process_reactor=None, check_exit_code=True,
|
||||
process_input=None, started_deferred=None,
|
||||
terminate_on_stderr=False):
|
||||
if process_reactor is None:
|
||||
@@ -131,10 +125,15 @@ def get_process_output(executable, args=None, env=None, path=None,
|
||||
args = args and args or ()
|
||||
env = env and env and {}
|
||||
deferred = defer.Deferred()
|
||||
cmd = executable
|
||||
if args:
|
||||
cmd = cmd + " " + ' '.join(args)
|
||||
logging.debug("Running cmd: %s", cmd)
|
||||
process_handler = BackRelayWithInput(
|
||||
deferred,
|
||||
started_deferred=started_deferred,
|
||||
check_exit_code=check_exit_code,
|
||||
deferred,
|
||||
cmd,
|
||||
started_deferred=started_deferred,
|
||||
check_exit_code=check_exit_code,
|
||||
process_input=process_input,
|
||||
terminate_on_stderr=terminate_on_stderr)
|
||||
# NOTE(vish): commands come in as unicode, but self.executes needs
|
||||
@@ -142,7 +141,7 @@ def get_process_output(executable, args=None, env=None, path=None,
|
||||
executable = str(executable)
|
||||
if not args is None:
|
||||
args = [str(x) for x in args]
|
||||
process_reactor.spawnProcess( process_handler, executable,
|
||||
process_reactor.spawnProcess( process_handler, executable,
|
||||
(executable,)+tuple(args), env, path)
|
||||
return deferred
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ def stop(pidfile):
|
||||
sys.stderr.write(message % pidfile)
|
||||
return # not an error in a restart
|
||||
|
||||
# Try killing the daemon process
|
||||
# Try killing the daemon process
|
||||
try:
|
||||
while 1:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
|
||||
@@ -33,8 +33,6 @@ class Context(object):
|
||||
class AccessTestCase(test.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(AccessTestCase, self).setUp()
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_storage = True
|
||||
um = manager.AuthManager()
|
||||
# Make test users
|
||||
try:
|
||||
|
||||
@@ -32,11 +32,9 @@ FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class AuthTestCase(test.BaseTestCase):
|
||||
flush_db = False
|
||||
def setUp(self):
|
||||
super(AuthTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.flags(connection_type='fake')
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
def test_001_can_create_users(self):
|
||||
|
||||
@@ -27,9 +27,9 @@ from xml.etree import ElementTree
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.compute import service
|
||||
from nova.endpoint import api
|
||||
from nova.endpoint import cloud
|
||||
|
||||
@@ -40,8 +40,7 @@ FLAGS = flags.FLAGS
|
||||
class CloudTestCase(test.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(CloudTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.flags(connection_type='fake')
|
||||
|
||||
self.conn = rpc.Connection.instance()
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
@@ -50,7 +49,7 @@ class CloudTestCase(test.BaseTestCase):
|
||||
self.cloud = cloud.CloudController()
|
||||
|
||||
# set up a service
|
||||
self.compute = service.ComputeService()
|
||||
self.compute = utils.import_class(FLAGS.compute_manager)
|
||||
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||
topic=FLAGS.compute_topic,
|
||||
proxy=self.compute)
|
||||
|
||||
@@ -15,113 +15,115 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Compute
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from twisted.internet import defer
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.compute import model
|
||||
from nova.compute import service
|
||||
from nova.auth import manager
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class InstanceXmlTestCase(test.TrialTestCase):
|
||||
# @defer.inlineCallbacks
|
||||
def test_serialization(self):
|
||||
# TODO: Reimplement this, it doesn't make sense in redis-land
|
||||
return
|
||||
|
||||
# instance_id = 'foo'
|
||||
# first_node = node.Node()
|
||||
# inst = yield first_node.run_instance(instance_id)
|
||||
#
|
||||
# # force the state so that we can verify that it changes
|
||||
# inst._s['state'] = node.Instance.NOSTATE
|
||||
# xml = inst.toXml()
|
||||
# self.assert_(ElementTree.parse(StringIO.StringIO(xml)))
|
||||
#
|
||||
# second_node = node.Node()
|
||||
# new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml)
|
||||
# self.assertEqual(new_inst.state, node.Instance.RUNNING)
|
||||
# rv = yield first_node.terminate_instance(instance_id)
|
||||
|
||||
|
||||
class ComputeConnectionTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
class ComputeTestCase(test.TrialTestCase):
|
||||
"""Test case for compute"""
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(ComputeConnectionTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.compute = service.ComputeService()
|
||||
super(ComputeTestCase, self).setUp()
|
||||
self.flags(connection_type='fake')
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.context = None
|
||||
|
||||
def create_instance(self):
|
||||
instdir = model.InstanceDirectory()
|
||||
inst = instdir.new()
|
||||
# TODO(ja): add ami, ari, aki, user_data
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
|
||||
def _create_instance(self):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['node_name'] = FLAGS.node_name
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst.save()
|
||||
return inst['instance_id']
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_describe_terminate(self):
|
||||
instance_id = self.create_instance()
|
||||
def test_run_terminate(self):
|
||||
"""Make sure it is possible to run and terminate instance"""
|
||||
instance_id = self._create_instance()
|
||||
|
||||
rv = yield self.compute.run_instance(instance_id)
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
|
||||
rv = yield self.compute.describe_instances()
|
||||
logging.info("Running instances: %s", rv)
|
||||
self.assertEqual(rv[instance_id].name, instance_id)
|
||||
instances = db.instance_get_all(None)
|
||||
logging.info("Running instances: %s", instances)
|
||||
self.assertEqual(len(instances), 1)
|
||||
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
rv = yield self.compute.describe_instances()
|
||||
logging.info("After terminating instances: %s", rv)
|
||||
self.assertEqual(rv, {})
|
||||
instances = db.instance_get_all(None)
|
||||
logging.info("After terminating instances: %s", instances)
|
||||
self.assertEqual(len(instances), 0)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_terminate_timestamps(self):
|
||||
"""Make sure it is possible to run and terminate instance"""
|
||||
instance_id = self._create_instance()
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assertEqual(instance_ref['launched_at'], None)
|
||||
self.assertEqual(instance_ref['terminated_at'], None)
|
||||
launch = datetime.datetime.utcnow()
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] > launch)
|
||||
self.assertEqual(instance_ref['terminated_at'], None)
|
||||
terminate = datetime.datetime.utcnow()
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get({'deleted': True}, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] < terminate)
|
||||
self.assert_(instance_ref['terminated_at'] > terminate)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_reboot(self):
|
||||
instance_id = self.create_instance()
|
||||
rv = yield self.compute.run_instance(instance_id)
|
||||
|
||||
rv = yield self.compute.describe_instances()
|
||||
self.assertEqual(rv[instance_id].name, instance_id)
|
||||
|
||||
yield self.compute.reboot_instance(instance_id)
|
||||
|
||||
rv = yield self.compute.describe_instances()
|
||||
self.assertEqual(rv[instance_id].name, instance_id)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
"""Ensure instance can be rebooted"""
|
||||
instance_id = self._create_instance()
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
yield self.compute.reboot_instance(self.context, instance_id)
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_console_output(self):
|
||||
instance_id = self.create_instance()
|
||||
rv = yield self.compute.run_instance(instance_id)
|
||||
"""Make sure we can get console output from instance"""
|
||||
instance_id = self._create_instance()
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
|
||||
console = yield self.compute.get_console_output(instance_id)
|
||||
console = yield self.compute.get_console_output(self.context,
|
||||
instance_id)
|
||||
self.assert_(console)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_instance_existing(self):
|
||||
instance_id = self.create_instance()
|
||||
rv = yield self.compute.run_instance(instance_id)
|
||||
|
||||
rv = yield self.compute.describe_instances()
|
||||
self.assertEqual(rv[instance_id].name, instance_id)
|
||||
|
||||
self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
"""Ensure failure when running an instance that already exists"""
|
||||
instance_id = self._create_instance()
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
self.assertFailure(self.compute.run_instance(self.context,
|
||||
instance_id),
|
||||
exception.Error)
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
@@ -20,9 +20,20 @@ from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_storage = True
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 16
|
||||
FLAGS.num_networks = 5
|
||||
FLAGS.fake_network = True
|
||||
flags.DECLARE('num_shelves', 'nova.volume.manager')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||
|
||||
@@ -1,292 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import time
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.compute import model
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class ModelTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(ModelTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
|
||||
def tearDown(self):
|
||||
model.Instance('i-test').destroy()
|
||||
model.Host('testhost').destroy()
|
||||
model.Daemon('testhost', 'nova-testdaemon').destroy()
|
||||
|
||||
def create_instance(self):
|
||||
inst = model.Instance('i-test')
|
||||
inst['reservation_id'] = 'r-test'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['private_dns_name'] = '10.0.0.1'
|
||||
inst.save()
|
||||
return inst
|
||||
|
||||
def create_host(self):
|
||||
host = model.Host('testhost')
|
||||
host.save()
|
||||
return host
|
||||
|
||||
def create_daemon(self):
|
||||
daemon = model.Daemon('testhost', 'nova-testdaemon')
|
||||
daemon.save()
|
||||
return daemon
|
||||
|
||||
def create_session_token(self):
|
||||
session_token = model.SessionToken('tk12341234')
|
||||
session_token['user'] = 'testuser'
|
||||
session_token.save()
|
||||
return session_token
|
||||
|
||||
def test_create_instance(self):
|
||||
"""store with create_instace, then test that a load finds it"""
|
||||
instance = self.create_instance()
|
||||
old = model.Instance(instance.identifier)
|
||||
self.assertFalse(old.is_new_record())
|
||||
|
||||
def test_delete_instance(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = self.create_instance()
|
||||
instance.destroy()
|
||||
newinst = model.Instance('i-test')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
def test_instance_added_to_set(self):
|
||||
"""create, then check that it is listed in global set"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().all:
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
def test_instance_associates_project(self):
|
||||
"""create, then check that it is listed for the project"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().by_project(instance.project):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
def test_instance_associates_ip(self):
|
||||
"""create, then check that it is listed for the ip"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
|
||||
self.assertEqual(x.identifier, 'i-test')
|
||||
|
||||
def test_instance_associates_node(self):
|
||||
"""create, then check that it is listed for the node_name"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().by_node(FLAGS.node_name):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assertFalse(found)
|
||||
instance['node_name'] = 'test_node'
|
||||
instance.save()
|
||||
for x in model.InstanceDirectory().by_node('test_node'):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
|
||||
def test_host_class_finds_hosts(self):
|
||||
host = self.create_host()
|
||||
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
|
||||
|
||||
def test_host_class_doesnt_find_missing_hosts(self):
|
||||
rv = model.Host.lookup('woahnelly')
|
||||
self.assertEqual(None, rv)
|
||||
|
||||
def test_create_host(self):
|
||||
"""store with create_host, then test that a load finds it"""
|
||||
host = self.create_host()
|
||||
old = model.Host(host.identifier)
|
||||
self.assertFalse(old.is_new_record())
|
||||
|
||||
def test_delete_host(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = self.create_host()
|
||||
instance.destroy()
|
||||
newinst = model.Host('testhost')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
def test_host_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = self.create_host()
|
||||
found = False
|
||||
for x in model.Host.all():
|
||||
if x.identifier == 'testhost':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
def test_create_daemon_two_args(self):
|
||||
"""create a daemon with two arguments"""
|
||||
d = self.create_daemon()
|
||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
def test_create_daemon_single_arg(self):
|
||||
"""Create a daemon using the combined host:bin format"""
|
||||
d = model.Daemon("testhost:nova-testdaemon")
|
||||
d.save()
|
||||
d = model.Daemon('testhost:nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
def test_equality_of_daemon_single_and_double_args(self):
|
||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
||||
d = model.Daemon("testhost:nova-testdaemon")
|
||||
d.save()
|
||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
def test_equality_daemon_of_double_and_single_args(self):
|
||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
||||
d = self.create_daemon()
|
||||
d = model.Daemon('testhost:nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
def test_delete_daemon(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = self.create_daemon()
|
||||
instance.destroy()
|
||||
newinst = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
def test_daemon_heartbeat(self):
|
||||
"""Create a daemon, sleep, heartbeat, check for update"""
|
||||
d = self.create_daemon()
|
||||
ts = d['updated_at']
|
||||
time.sleep(2)
|
||||
d.heartbeat()
|
||||
d2 = model.Daemon('testhost', 'nova-testdaemon')
|
||||
ts2 = d2['updated_at']
|
||||
self.assert_(ts2 > ts)
|
||||
|
||||
def test_daemon_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = self.create_daemon()
|
||||
found = False
|
||||
for x in model.Daemon.all():
|
||||
if x.identifier == 'testhost:nova-testdaemon':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
def test_daemon_associates_host(self):
|
||||
"""create, then check that it is listed for the host"""
|
||||
instance = self.create_daemon()
|
||||
found = False
|
||||
for x in model.Daemon.by_host('testhost'):
|
||||
if x.identifier == 'testhost:nova-testdaemon':
|
||||
found = True
|
||||
self.assertTrue(found)
|
||||
|
||||
def test_create_session_token(self):
|
||||
"""create"""
|
||||
d = self.create_session_token()
|
||||
d = model.SessionToken(d.token)
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
def test_delete_session_token(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = self.create_session_token()
|
||||
instance.destroy()
|
||||
newinst = model.SessionToken(instance.token)
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
def test_session_token_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = self.create_session_token()
|
||||
found = False
|
||||
for x in model.SessionToken.all():
|
||||
if x.identifier == instance.token:
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
def test_session_token_associates_user(self):
|
||||
"""create, then check that it is listed for the user"""
|
||||
instance = self.create_session_token()
|
||||
found = False
|
||||
for x in model.SessionToken.associated_to('user', 'testuser'):
|
||||
if x.identifier == instance.identifier:
|
||||
found = True
|
||||
self.assertTrue(found)
|
||||
|
||||
def test_session_token_generation(self):
|
||||
instance = model.SessionToken.generate('username', 'TokenType')
|
||||
self.assertFalse(instance.is_new_record())
|
||||
|
||||
def test_find_generated_session_token(self):
|
||||
instance = model.SessionToken.generate('username', 'TokenType')
|
||||
found = model.SessionToken.lookup(instance.identifier)
|
||||
self.assert_(found)
|
||||
|
||||
def test_update_session_token_expiry(self):
|
||||
instance = model.SessionToken('tk12341234')
|
||||
oldtime = datetime.utcnow()
|
||||
instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
|
||||
instance.update_expiry()
|
||||
expiry = utils.parse_isotime(instance['expiry'])
|
||||
self.assert_(expiry > datetime.utcnow())
|
||||
|
||||
def test_session_token_lookup_when_expired(self):
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
||||
instance.save()
|
||||
inst = model.SessionToken.lookup(instance.identifier)
|
||||
self.assertFalse(inst)
|
||||
|
||||
def test_session_token_lookup_when_not_expired(self):
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
inst = model.SessionToken.lookup(instance.identifier)
|
||||
self.assert_(inst)
|
||||
|
||||
def test_session_token_is_expired_when_expired(self):
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
||||
self.assert_(instance.is_expired())
|
||||
|
||||
def test_session_token_is_expired_when_not_expired(self):
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
self.assertFalse(instance.is_expired())
|
||||
|
||||
def test_session_token_ttl(self):
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
now = datetime.utcnow()
|
||||
delta = timedelta(hours=1)
|
||||
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
|
||||
# give 5 seconds of fuzziness
|
||||
self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)
|
||||
@@ -22,14 +22,13 @@ import IPy
|
||||
import os
|
||||
import logging
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.network import model
|
||||
from nova.network import service
|
||||
from nova.network import vpn
|
||||
from nova.network.exception import NoMoreAddresses
|
||||
from nova.endpoint import api
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@@ -41,183 +40,180 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
# NOTE(vish): if you change these flags, make sure to change the
|
||||
# flags in the corresponding section in nova-dhcpbridge
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True,
|
||||
fake_network=True,
|
||||
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
|
||||
network_size=32)
|
||||
network_size=16,
|
||||
num_networks=5)
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
||||
self.projects = []
|
||||
self.projects.append(self.manager.create_project('netuser',
|
||||
'netuser',
|
||||
'netuser'))
|
||||
for i in range(0, 6):
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
self.context = api.APIRequestContext(None, project=None, user=self.user)
|
||||
for i in range(5):
|
||||
name = 'project%s' % i
|
||||
self.projects.append(self.manager.create_project(name,
|
||||
'netuser',
|
||||
name))
|
||||
vpn.NetworkData.create(self.projects[i].id)
|
||||
self.service = service.VlanNetworkService()
|
||||
# create the necessary network data for the project
|
||||
self.network.set_network_host(self.context, self.projects[i].id)
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': utils.generate_mac()})
|
||||
self.instance_id = instance_ref['id']
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': utils.generate_mac()})
|
||||
self.instance2_id = instance_ref['id']
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
super(NetworkTestCase, self).tearDown()
|
||||
# TODO(termie): this should really be instantiating clean datastores
|
||||
# in between runs, one failure kills all the tests
|
||||
db.instance_destroy(None, self.instance_id)
|
||||
db.instance_destroy(None, self.instance2_id)
|
||||
for project in self.projects:
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(self.user)
|
||||
|
||||
def test_public_network_allocation(self):
|
||||
def _create_address(self, project_num, instance_id=None):
|
||||
"""Create an address in given project num"""
|
||||
if instance_id is None:
|
||||
instance_id = self.instance_id
|
||||
self.context.project = self.projects[project_num]
|
||||
return self.network.allocate_fixed_ip(self.context, instance_id)
|
||||
|
||||
def test_public_network_association(self):
|
||||
"""Makes sure that we can allocaate a public ip"""
|
||||
# TODO(vish): better way of adding floating ips
|
||||
pubnet = IPy.IP(flags.FLAGS.public_range)
|
||||
address = self.service.allocate_elastic_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
self.assertTrue(IPy.IP(address) in pubnet)
|
||||
address = str(pubnet[0])
|
||||
try:
|
||||
db.floating_ip_get_by_address(None, address)
|
||||
except exception.NotFound:
|
||||
db.floating_ip_create(None, {'address': address,
|
||||
'host': FLAGS.host})
|
||||
float_addr = self.network.allocate_floating_ip(self.context,
|
||||
self.projects[0].id)
|
||||
fix_addr = self._create_address(0)
|
||||
self.assertEqual(float_addr, str(pubnet[0]))
|
||||
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
|
||||
address = db.instance_get_floating_address(None, self.instance_id)
|
||||
self.assertEqual(address, float_addr)
|
||||
self.network.disassociate_floating_ip(self.context, float_addr)
|
||||
address = db.instance_get_floating_address(None, self.instance_id)
|
||||
self.assertEqual(address, None)
|
||||
self.network.deallocate_floating_ip(self.context, float_addr)
|
||||
self.network.deallocate_fixed_ip(self.context, fix_addr)
|
||||
|
||||
def test_allocate_deallocate_fixed_ip(self):
|
||||
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
||||
result = self.service.allocate_fixed_ip(
|
||||
self.user.id, self.projects[0].id)
|
||||
address = result['private_dns_name']
|
||||
mac = result['mac_address']
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
||||
hostname = "test-host"
|
||||
issue_ip(mac, address, hostname, net.bridge_name)
|
||||
self.service.deallocate_fixed_ip(address)
|
||||
address = self._create_address(0)
|
||||
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||
lease_ip(address)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
|
||||
# Doesn't go away until it's dhcp released
|
||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
||||
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||
|
||||
release_ip(mac, address, hostname, net.bridge_name)
|
||||
self.assertEqual(False, is_in_project(address, self.projects[0].id))
|
||||
release_ip(address)
|
||||
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
|
||||
|
||||
def test_side_effects(self):
|
||||
"""Ensures allocating and releasing has no side effects"""
|
||||
hostname = "side-effect-host"
|
||||
result = self.service.allocate_fixed_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
mac = result['mac_address']
|
||||
address = result['private_dns_name']
|
||||
result = self.service.allocate_fixed_ip(self.user,
|
||||
self.projects[1].id)
|
||||
secondmac = result['mac_address']
|
||||
secondaddress = result['private_dns_name']
|
||||
address = self._create_address(0)
|
||||
address2 = self._create_address(1, self.instance2_id)
|
||||
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
secondnet = model.get_project_network(self.projects[1].id, "default")
|
||||
|
||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
||||
self.assertEqual(True, is_in_project(secondaddress,
|
||||
self.projects[1].id))
|
||||
self.assertEqual(False, is_in_project(address, self.projects[1].id))
|
||||
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
|
||||
self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
|
||||
|
||||
# Addresses are allocated before they're issued
|
||||
issue_ip(mac, address, hostname, net.bridge_name)
|
||||
issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
|
||||
lease_ip(address)
|
||||
lease_ip(address2)
|
||||
|
||||
self.service.deallocate_fixed_ip(address)
|
||||
release_ip(mac, address, hostname, net.bridge_name)
|
||||
self.assertEqual(False, is_in_project(address, self.projects[0].id))
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
release_ip(address)
|
||||
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
|
||||
|
||||
# First address release shouldn't affect the second
|
||||
self.assertEqual(True, is_in_project(secondaddress,
|
||||
self.projects[1].id))
|
||||
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
|
||||
|
||||
self.service.deallocate_fixed_ip(secondaddress)
|
||||
release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
|
||||
self.assertEqual(False, is_in_project(secondaddress,
|
||||
self.projects[1].id))
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
release_ip(address2)
|
||||
self.assertFalse(is_allocated_in_project(address2,
|
||||
self.projects[1].id))
|
||||
|
||||
def test_subnet_edge(self):
|
||||
"""Makes sure that private ips don't overlap"""
|
||||
result = self.service.allocate_fixed_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
firstaddress = result['private_dns_name']
|
||||
hostname = "toomany-hosts"
|
||||
first = self._create_address(0)
|
||||
lease_ip(first)
|
||||
instance_ids = []
|
||||
for i in range(1, 5):
|
||||
project_id = self.projects[i].id
|
||||
result = self.service.allocate_fixed_ip(
|
||||
self.user, project_id)
|
||||
mac = result['mac_address']
|
||||
address = result['private_dns_name']
|
||||
result = self.service.allocate_fixed_ip(
|
||||
self.user, project_id)
|
||||
mac2 = result['mac_address']
|
||||
address2 = result['private_dns_name']
|
||||
result = self.service.allocate_fixed_ip(
|
||||
self.user, project_id)
|
||||
mac3 = result['mac_address']
|
||||
address3 = result['private_dns_name']
|
||||
net = model.get_project_network(project_id, "default")
|
||||
issue_ip(mac, address, hostname, net.bridge_name)
|
||||
issue_ip(mac2, address2, hostname, net.bridge_name)
|
||||
issue_ip(mac3, address3, hostname, net.bridge_name)
|
||||
self.assertEqual(False, is_in_project(address,
|
||||
self.projects[0].id))
|
||||
self.assertEqual(False, is_in_project(address2,
|
||||
self.projects[0].id))
|
||||
self.assertEqual(False, is_in_project(address3,
|
||||
self.projects[0].id))
|
||||
self.service.deallocate_fixed_ip(address)
|
||||
self.service.deallocate_fixed_ip(address2)
|
||||
self.service.deallocate_fixed_ip(address3)
|
||||
release_ip(mac, address, hostname, net.bridge_name)
|
||||
release_ip(mac2, address2, hostname, net.bridge_name)
|
||||
release_ip(mac3, address3, hostname, net.bridge_name)
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
self.service.deallocate_fixed_ip(firstaddress)
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address = self._create_address(i, instance_ref['id'])
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address2 = self._create_address(i, instance_ref['id'])
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address3 = self._create_address(i, instance_ref['id'])
|
||||
lease_ip(address)
|
||||
lease_ip(address2)
|
||||
lease_ip(address3)
|
||||
self.assertFalse(is_allocated_in_project(address,
|
||||
self.projects[0].id))
|
||||
self.assertFalse(is_allocated_in_project(address2,
|
||||
self.projects[0].id))
|
||||
self.assertFalse(is_allocated_in_project(address3,
|
||||
self.projects[0].id))
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
self.network.deallocate_fixed_ip(self.context, address3)
|
||||
release_ip(address)
|
||||
release_ip(address2)
|
||||
release_ip(address3)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(None, instance_id)
|
||||
release_ip(first)
|
||||
self.network.deallocate_fixed_ip(self.context, first)
|
||||
|
||||
def test_vpn_ip_and_port_looks_valid(self):
|
||||
"""Ensure the vpn ip and port are reasonable"""
|
||||
self.assert_(self.projects[0].vpn_ip)
|
||||
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
|
||||
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
|
||||
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
|
||||
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
|
||||
FLAGS.num_networks)
|
||||
|
||||
def test_too_many_vpns(self):
|
||||
"""Ensure error is raised if we run out of vpn ports"""
|
||||
vpns = []
|
||||
for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
|
||||
vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
|
||||
self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
|
||||
for network_datum in vpns:
|
||||
network_datum.destroy()
|
||||
def test_too_many_networks(self):
|
||||
"""Ensure error is raised if we run out of networks"""
|
||||
projects = []
|
||||
networks_left = FLAGS.num_networks - db.network_count(None)
|
||||
for i in range(networks_left):
|
||||
project = self.manager.create_project('many%s' % i, self.user)
|
||||
projects.append(project)
|
||||
self.assertRaises(db.NoMoreNetworks,
|
||||
self.manager.create_project,
|
||||
'boom',
|
||||
self.user)
|
||||
for project in projects:
|
||||
self.manager.delete_project(project)
|
||||
|
||||
def test_ips_are_reused(self):
|
||||
"""Makes sure that ip addresses that are deallocated get reused"""
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
address = self._create_address(0)
|
||||
lease_ip(address)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
release_ip(address)
|
||||
|
||||
hostname = "reuse-host"
|
||||
macs = {}
|
||||
addresses = {}
|
||||
num_available_ips = net.num_available_ips
|
||||
for i in range(num_available_ips - 1):
|
||||
result = self.service.allocate_fixed_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
macs[i] = result['mac_address']
|
||||
addresses[i] = result['private_dns_name']
|
||||
issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
||||
|
||||
result = self.service.allocate_fixed_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
mac = result['mac_address']
|
||||
address = result['private_dns_name']
|
||||
|
||||
issue_ip(mac, address, hostname, net.bridge_name)
|
||||
self.service.deallocate_fixed_ip(address)
|
||||
release_ip(mac, address, hostname, net.bridge_name)
|
||||
|
||||
result = self.service.allocate_fixed_ip(
|
||||
self.user, self.projects[0].id)
|
||||
secondmac = result['mac_address']
|
||||
secondaddress = result['private_dns_name']
|
||||
self.assertEqual(address, secondaddress)
|
||||
issue_ip(secondmac, secondaddress, hostname, net.bridge_name)
|
||||
self.service.deallocate_fixed_ip(secondaddress)
|
||||
release_ip(secondmac, secondaddress, hostname, net.bridge_name)
|
||||
|
||||
for i in range(len(addresses)):
|
||||
self.service.deallocate_fixed_ip(addresses[i])
|
||||
release_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
||||
address2 = self._create_address(0)
|
||||
self.assertEqual(address, address2)
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
|
||||
def test_available_ips(self):
|
||||
"""Make sure the number of available ips for the network is correct
|
||||
@@ -230,43 +226,53 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
There are ips reserved at the bottom and top of the range.
|
||||
services (network, gateway, CloudPipe, broadcast)
|
||||
"""
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
num_preallocated_ips = len(net.assigned)
|
||||
network = db.project_get_network(None, self.projects[0].id)
|
||||
net_size = flags.FLAGS.network_size
|
||||
num_available_ips = net_size - (net.num_bottom_reserved_ips +
|
||||
num_preallocated_ips +
|
||||
net.num_top_reserved_ips)
|
||||
self.assertEqual(num_available_ips, net.num_available_ips)
|
||||
total_ips = (db.network_count_available_ips(None, network['id']) +
|
||||
db.network_count_reserved_ips(None, network['id']) +
|
||||
db.network_count_allocated_ips(None, network['id']))
|
||||
self.assertEqual(total_ips, net_size)
|
||||
|
||||
def test_too_many_addresses(self):
|
||||
"""Test for a NoMoreAddresses exception when all fixed ips are used.
|
||||
"""
|
||||
net = model.get_project_network(self.projects[0].id, "default")
|
||||
|
||||
hostname = "toomany-hosts"
|
||||
macs = {}
|
||||
addresses = {}
|
||||
num_available_ips = net.num_available_ips
|
||||
network = db.project_get_network(None, self.projects[0].id)
|
||||
num_available_ips = db.network_count_available_ips(None,
|
||||
network['id'])
|
||||
addresses = []
|
||||
instance_ids = []
|
||||
for i in range(num_available_ips):
|
||||
result = self.service.allocate_fixed_ip(self.user.id,
|
||||
self.projects[0].id)
|
||||
macs[i] = result['mac_address']
|
||||
addresses[i] = result['private_dns_name']
|
||||
issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address = self._create_address(0, instance_ref['id'])
|
||||
addresses.append(address)
|
||||
lease_ip(address)
|
||||
|
||||
self.assertEqual(net.num_available_ips, 0)
|
||||
self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip,
|
||||
self.user.id, self.projects[0].id)
|
||||
self.assertEqual(db.network_count_available_ips(None,
|
||||
network['id']), 0)
|
||||
self.assertRaises(db.NoMoreAddresses,
|
||||
self.network.allocate_fixed_ip,
|
||||
self.context,
|
||||
'foo')
|
||||
|
||||
for i in range(len(addresses)):
|
||||
self.service.deallocate_fixed_ip(addresses[i])
|
||||
release_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
||||
self.assertEqual(net.num_available_ips, num_available_ips)
|
||||
for i in range(num_available_ips):
|
||||
self.network.deallocate_fixed_ip(self.context, addresses[i])
|
||||
release_ip(addresses[i])
|
||||
db.instance_destroy(None, instance_ids[i])
|
||||
self.assertEqual(db.network_count_available_ips(None,
|
||||
network['id']),
|
||||
num_available_ips)
|
||||
|
||||
|
||||
def is_in_project(address, project_id):
|
||||
def is_allocated_in_project(address, project_id):
|
||||
"""Returns true if address is in specified project"""
|
||||
return address in model.get_project_network(project_id).assigned
|
||||
project_net = db.project_get_network(None, project_id)
|
||||
network = db.fixed_ip_get_network(None, address)
|
||||
instance = db.fixed_ip_get_instance(None, address)
|
||||
# instance exists until release
|
||||
return instance is not None and network['id'] == project_net['id']
|
||||
|
||||
|
||||
def binpath(script):
|
||||
@@ -274,22 +280,28 @@ def binpath(script):
|
||||
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
|
||||
|
||||
|
||||
def issue_ip(mac, private_ip, hostname, interface):
|
||||
def lease_ip(private_ip):
|
||||
"""Run add command on dhcpbridge"""
|
||||
cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
|
||||
mac, private_ip, hostname)
|
||||
env = {'DNSMASQ_INTERFACE': interface,
|
||||
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||
instance_ref['mac_address'],
|
||||
private_ip)
|
||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
(out, err) = utils.execute(cmd, addl_env=env)
|
||||
logging.debug("ISSUE_IP: %s, %s ", out, err)
|
||||
|
||||
|
||||
def release_ip(mac, private_ip, hostname, interface):
|
||||
def release_ip(private_ip):
|
||||
"""Run del command on dhcpbridge"""
|
||||
cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
|
||||
mac, private_ip, hostname)
|
||||
env = {'DNSMASQ_INTERFACE': interface,
|
||||
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||
instance_ref['mac_address'],
|
||||
private_ip)
|
||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
(out, err) = utils.execute(cmd, addl_env=env)
|
||||
|
||||
@@ -21,7 +21,6 @@ from nova import flags
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
FLAGS.connection_type = 'libvirt'
|
||||
FLAGS.fake_storage = False
|
||||
FLAGS.fake_rabbit = False
|
||||
FLAGS.fake_network = False
|
||||
FLAGS.verbose = False
|
||||
|
||||
182
nova/tests/service_unittest.py
Normal file
182
nova/tests/service_unittest.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unit Tests for remote procedure calls using queue
|
||||
"""
|
||||
|
||||
import mox
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import service
|
||||
from nova import manager
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
|
||||
"Manager for testing")
|
||||
|
||||
|
||||
class FakeManager(manager.Manager):
|
||||
"""Fake manager for tests"""
|
||||
pass
|
||||
|
||||
|
||||
class ServiceTestCase(test.BaseTestCase):
|
||||
"""Test cases for rpc"""
|
||||
|
||||
def setUp(self): # pylint: disable=C0103
|
||||
super(ServiceTestCase, self).setUp()
|
||||
self.mox.StubOutWithMock(service, 'db')
|
||||
|
||||
def test_create(self):
|
||||
host = 'foo'
|
||||
binary = 'nova-fake'
|
||||
topic = 'fake'
|
||||
self.mox.StubOutWithMock(rpc,
|
||||
'AdapterConsumer',
|
||||
use_mock_anything=True)
|
||||
self.mox.StubOutWithMock(
|
||||
service.task, 'LoopingCall', use_mock_anything=True)
|
||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||
topic=topic,
|
||||
proxy=mox.IsA(service.Service)).AndReturn(
|
||||
rpc.AdapterConsumer)
|
||||
|
||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||
topic='%s.%s' % (topic, host),
|
||||
proxy=mox.IsA(service.Service)).AndReturn(
|
||||
rpc.AdapterConsumer)
|
||||
|
||||
# Stub out looping call a bit needlessly since we don't have an easy
|
||||
# way to cancel it (yet) when the tests finishes
|
||||
service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
|
||||
service.task.LoopingCall)
|
||||
service.task.LoopingCall.start(interval=mox.IgnoreArg(),
|
||||
now=mox.IgnoreArg())
|
||||
|
||||
rpc.AdapterConsumer.attach_to_twisted()
|
||||
rpc.AdapterConsumer.attach_to_twisted()
|
||||
service_create = {'host': host,
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0}
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.service_get_by_args(None,
|
||||
host,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
service.db.service_create(None,
|
||||
service_create).AndReturn(service_ref)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
app = service.Service.create(host=host, binary=binary)
|
||||
self.assert_(app)
|
||||
|
||||
# We're testing sort of weird behavior in how report_state decides
|
||||
# whether it is disconnected, it looks for a variable on itself called
|
||||
# 'model_disconnected' and report_state doesn't really do much so this
|
||||
# these are mostly just for coverage
|
||||
def test_report_state(self):
|
||||
host = 'foo'
|
||||
binary = 'bar'
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.service_get_by_args(None,
|
||||
host,
|
||||
binary).AndReturn(service_ref)
|
||||
service.db.service_update(None, service_ref['id'],
|
||||
mox.ContainsKeyValue('report_count', 1))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
s = service.Service()
|
||||
rv = yield s.report_state(host, binary)
|
||||
|
||||
def test_report_state_no_service(self):
|
||||
host = 'foo'
|
||||
binary = 'bar'
|
||||
service_create = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0}
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.service_get_by_args(None,
|
||||
host,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
service.db.service_create(None,
|
||||
service_create).AndReturn(service_ref)
|
||||
service.db.service_get(None, service_ref['id']).AndReturn(service_ref)
|
||||
service.db.service_update(None, service_ref['id'],
|
||||
mox.ContainsKeyValue('report_count', 1))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
s = service.Service()
|
||||
rv = yield s.report_state(host, binary)
|
||||
|
||||
def test_report_state_newly_disconnected(self):
|
||||
host = 'foo'
|
||||
binary = 'bar'
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.service_get_by_args(None,
|
||||
host,
|
||||
binary).AndRaise(Exception())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
s = service.Service()
|
||||
rv = yield s.report_state(host, binary)
|
||||
|
||||
self.assert_(s.model_disconnected)
|
||||
|
||||
def test_report_state_newly_connected(self):
|
||||
host = 'foo'
|
||||
binary = 'bar'
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.service_get_by_args(None,
|
||||
host,
|
||||
binary).AndReturn(service_ref)
|
||||
service.db.service_update(None, service_ref['id'],
|
||||
mox.ContainsKeyValue('report_count', 1))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
s = service.Service()
|
||||
s.model_disconnected = True
|
||||
rv = yield s.report_state(host, binary)
|
||||
|
||||
self.assert_(not s.model_disconnected)
|
||||
@@ -1,115 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.compute import node
|
||||
from nova.volume import storage
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class StorageTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(StorageTestCase, self).setUp()
|
||||
self.mynode = node.Node()
|
||||
self.mystorage = None
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.mystorage = storage.BlockStore()
|
||||
|
||||
def test_run_create_volume(self):
|
||||
vol_size = '0'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
|
||||
# TODO(termie): get_volume returns differently than create_volume
|
||||
self.assertEqual(volume_id,
|
||||
storage.get_volume(volume_id)['volume_id'])
|
||||
|
||||
rv = self.mystorage.delete_volume(volume_id)
|
||||
self.assertRaises(exception.Error,
|
||||
storage.get_volume,
|
||||
volume_id)
|
||||
|
||||
def test_too_big_volume(self):
|
||||
vol_size = '1001'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
self.assertRaises(TypeError,
|
||||
self.mystorage.create_volume,
|
||||
vol_size, user_id, project_id)
|
||||
|
||||
def test_too_many_volumes(self):
|
||||
vol_size = '1'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
|
||||
total_slots = FLAGS.slots_per_shelf * num_shelves
|
||||
vols = []
|
||||
for i in xrange(total_slots):
|
||||
vid = self.mystorage.create_volume(vol_size, user_id, project_id)
|
||||
vols.append(vid)
|
||||
self.assertRaises(storage.NoMoreVolumes,
|
||||
self.mystorage.create_volume,
|
||||
vol_size, user_id, project_id)
|
||||
for id in vols:
|
||||
self.mystorage.delete_volume(id)
|
||||
|
||||
def test_run_attach_detach_volume(self):
|
||||
# Create one volume and one node to test with
|
||||
instance_id = "storage-test"
|
||||
vol_size = "5"
|
||||
user_id = "fake"
|
||||
project_id = 'fake'
|
||||
mountpoint = "/dev/sdf"
|
||||
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
|
||||
|
||||
volume_obj = storage.get_volume(volume_id)
|
||||
volume_obj.start_attach(instance_id, mountpoint)
|
||||
rv = yield self.mynode.attach_volume(volume_id,
|
||||
instance_id,
|
||||
mountpoint)
|
||||
self.assertEqual(volume_obj['status'], "in-use")
|
||||
self.assertEqual(volume_obj['attachStatus'], "attached")
|
||||
self.assertEqual(volume_obj['instance_id'], instance_id)
|
||||
self.assertEqual(volume_obj['mountpoint'], mountpoint)
|
||||
|
||||
self.assertRaises(exception.Error,
|
||||
self.mystorage.delete_volume,
|
||||
volume_id)
|
||||
|
||||
rv = yield self.mystorage.detach_volume(volume_id)
|
||||
volume_obj = storage.get_volume(volume_id)
|
||||
self.assertEqual(volume_obj['status'], "available")
|
||||
|
||||
rv = self.mystorage.delete_volume(volume_id)
|
||||
self.assertRaises(exception.Error,
|
||||
storage.get_volume,
|
||||
volume_id)
|
||||
|
||||
def test_multi_node(self):
|
||||
# TODO(termie): Figure out how to test with two nodes,
|
||||
# each of them having a different FLAG for storage_node
|
||||
# This will allow us to test cross-node interactions
|
||||
pass
|
||||
@@ -15,149 +15,159 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Volume Code
|
||||
"""
|
||||
import logging
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.volume import service as volume_service
|
||||
|
||||
from nova import utils
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class VolumeTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
"""Test Case for volumes"""
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(VolumeTestCase, self).setUp()
|
||||
self.compute = compute.service.ComputeService()
|
||||
self.volume = None
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True,
|
||||
aoe_export_dir=self.tempdir)
|
||||
self.volume = volume_service.VolumeService()
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.flags(connection_type='fake')
|
||||
self.volume = utils.import_object(FLAGS.volume_manager)
|
||||
self.context = None
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tempdir)
|
||||
@staticmethod
|
||||
def _create_volume(size='0'):
|
||||
"""Create a volume object"""
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['user_id'] = 'fake'
|
||||
vol['project_id'] = 'fake'
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
vol['status'] = "creating"
|
||||
vol['attach_status'] = "detached"
|
||||
return db.volume_create(None, vol)['id']
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_create_volume(self):
|
||||
vol_size = '0'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
|
||||
# TODO(termie): get_volume returns differently than create_volume
|
||||
self.assertEqual(volume_id,
|
||||
volume_service.get_volume(volume_id)['volume_id'])
|
||||
def test_create_delete_volume(self):
|
||||
"""Test volume can be created and deleted"""
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
|
||||
|
||||
rv = self.volume.delete_volume(volume_id)
|
||||
self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.volume_get,
|
||||
None,
|
||||
volume_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_too_big_volume(self):
|
||||
vol_size = '1001'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
"""Ensure failure if a too large of a volume is requested"""
|
||||
# FIXME(vish): validation needs to move into the data layer in
|
||||
# volume_create
|
||||
defer.returnValue(True)
|
||||
try:
|
||||
yield self.volume.create_volume(vol_size, user_id, project_id)
|
||||
volume_id = self._create_volume('1001')
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
self.fail("Should have thrown TypeError")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_too_many_volumes(self):
|
||||
vol_size = '1'
|
||||
user_id = 'fake'
|
||||
project_id = 'fake'
|
||||
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
|
||||
total_slots = FLAGS.blades_per_shelf * num_shelves
|
||||
"""Ensure that NoMoreBlades is raised when we run out of volumes"""
|
||||
vols = []
|
||||
from nova import datastore
|
||||
redis = datastore.Redis.instance()
|
||||
for i in xrange(total_slots):
|
||||
vid = yield self.volume.create_volume(vol_size, user_id, project_id)
|
||||
vols.append(vid)
|
||||
self.assertFailure(self.volume.create_volume(vol_size,
|
||||
user_id,
|
||||
project_id),
|
||||
volume_service.NoMoreBlades)
|
||||
for id in vols:
|
||||
yield self.volume.delete_volume(id)
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
vols.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
self.assertFailure(self.volume.create_volume(self.context,
|
||||
volume_id),
|
||||
db.NoMoreBlades)
|
||||
db.volume_destroy(None, volume_id)
|
||||
for volume_id in vols:
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_attach_detach_volume(self):
|
||||
# Create one volume and one compute to test with
|
||||
instance_id = "storage-test"
|
||||
vol_size = "5"
|
||||
user_id = "fake"
|
||||
project_id = 'fake'
|
||||
"""Make sure volume can be attached and detached from instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
instance_id = db.instance_create(self.context, inst)['id']
|
||||
mountpoint = "/dev/sdf"
|
||||
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
|
||||
volume_obj = volume_service.get_volume(volume_id)
|
||||
volume_obj.start_attach(instance_id, mountpoint)
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
if FLAGS.fake_tests:
|
||||
volume_obj.finish_attach()
|
||||
db.volume_attached(None, volume_id, instance_id, mountpoint)
|
||||
else:
|
||||
rv = yield self.compute.attach_volume(instance_id,
|
||||
volume_id,
|
||||
mountpoint)
|
||||
self.assertEqual(volume_obj['status'], "in-use")
|
||||
self.assertEqual(volume_obj['attach_status'], "attached")
|
||||
self.assertEqual(volume_obj['instance_id'], instance_id)
|
||||
self.assertEqual(volume_obj['mountpoint'], mountpoint)
|
||||
yield self.compute.attach_volume(instance_id,
|
||||
volume_id,
|
||||
mountpoint)
|
||||
vol = db.volume_get(None, volume_id)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
instance_ref = db.volume_get_instance(self.context, volume_id)
|
||||
self.assertEqual(instance_ref['id'], instance_id)
|
||||
|
||||
self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
|
||||
volume_obj.start_detach()
|
||||
self.assertFailure(self.volume.delete_volume(self.context, volume_id),
|
||||
exception.Error)
|
||||
if FLAGS.fake_tests:
|
||||
volume_obj.finish_detach()
|
||||
db.volume_detached(None, volume_id)
|
||||
else:
|
||||
rv = yield self.volume.detach_volume(instance_id,
|
||||
volume_id)
|
||||
volume_obj = volume_service.get_volume(volume_id)
|
||||
self.assertEqual(volume_obj['status'], "available")
|
||||
yield self.compute.detach_volume(instance_id,
|
||||
volume_id)
|
||||
vol = db.volume_get(None, volume_id)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
|
||||
rv = self.volume.delete_volume(volume_id)
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
self.assertRaises(exception.Error,
|
||||
volume_service.get_volume,
|
||||
db.volume_get,
|
||||
None,
|
||||
volume_id)
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_multiple_volume_race_condition(self):
|
||||
vol_size = "5"
|
||||
user_id = "fake"
|
||||
project_id = 'fake'
|
||||
@defer.inlineCallbacks
|
||||
def test_concurrent_volumes_get_different_blades(self):
|
||||
"""Ensure multiple concurrent volumes get different blades"""
|
||||
volume_ids = []
|
||||
shelf_blades = []
|
||||
|
||||
def _check(volume_id):
|
||||
vol = volume_service.get_volume(volume_id)
|
||||
shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
|
||||
self.assertTrue(shelf_blade not in shelf_blades,
|
||||
"Same shelf/blade tuple came back twice")
|
||||
"""Make sure blades aren't duplicated"""
|
||||
volume_ids.append(volume_id)
|
||||
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(None,
|
||||
volume_id)
|
||||
shelf_blade = '%s.%s' % (shelf_id, blade_id)
|
||||
self.assert_(shelf_blade not in shelf_blades)
|
||||
shelf_blades.append(shelf_blade)
|
||||
logging.debug("got %s" % shelf_blade)
|
||||
return vol
|
||||
logging.debug("Blade %s allocated", shelf_blade)
|
||||
deferreds = []
|
||||
for i in range(5):
|
||||
d = self.volume.create_volume(vol_size, user_id, project_id)
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
d = self.volume.create_volume(self.context, volume_id)
|
||||
d.addCallback(_check)
|
||||
d.addErrback(self.fail)
|
||||
deferreds.append(d)
|
||||
def destroy_volumes(retvals):
|
||||
overall_succes = True
|
||||
for success, volume in retvals:
|
||||
if not success:
|
||||
overall_succes = False
|
||||
else:
|
||||
volume.destroy()
|
||||
self.assertTrue(overall_succes)
|
||||
d = defer.DeferredList(deferreds)
|
||||
d.addCallback(destroy_volumes)
|
||||
return d
|
||||
yield defer.DeferredList(deferreds)
|
||||
for volume_id in volume_ids:
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
def test_multi_node(self):
|
||||
# TODO(termie): Figure out how to test with two nodes,
|
||||
|
||||
@@ -55,11 +55,11 @@ from nova.tests.api_unittest import *
|
||||
from nova.tests.cloud_unittest import *
|
||||
from nova.tests.compute_unittest import *
|
||||
from nova.tests.flags_unittest import *
|
||||
from nova.tests.model_unittest import *
|
||||
from nova.tests.network_unittest import *
|
||||
from nova.tests.objectstore_unittest import *
|
||||
from nova.tests.process_unittest import *
|
||||
from nova.tests.rpc_unittest import *
|
||||
from nova.tests.service_unittest import *
|
||||
from nova.tests.validator_unittest import *
|
||||
from nova.tests.volume_unittest import *
|
||||
|
||||
|
||||
Reference in New Issue
Block a user