Merge trunk
This commit is contained in:
@@ -44,18 +44,20 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||
flags.DECLARE('redis_db', 'nova.datastore')
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||
|
||||
|
||||
def add_lease(_mac, ip_address, _hostname, _interface):
|
||||
def add_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("leasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.lease_fixed_ip(None, ip_address)
|
||||
network_manager.lease_fixed_ip(None, mac, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||
{"method": "lease_fixed_ip",
|
||||
"args": {"context": None,
|
||||
"mac": mac,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
@@ -64,16 +66,17 @@ def old_lease(_mac, _ip_address, _hostname, _interface):
|
||||
logging.debug("Adopted old lease or got a change of mac/hostname")
|
||||
|
||||
|
||||
def del_lease(_mac, ip_address, _hostname, _interface):
|
||||
def del_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("releasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.release_fixed_ip(None, ip_address)
|
||||
network_manager.release_fixed_ip(None, mac, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||
{"method": "release_fixed_ip",
|
||||
"args": {"context": None,
|
||||
"mac": mac,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
|
@@ -17,6 +17,37 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Interactive shell based on Django:
|
||||
#
|
||||
# Copyright (c) 2005, the Lawrence Journal-World
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of Django nor the names of its contributors may be used
|
||||
# to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
"""
|
||||
CLI interface for nova management.
|
||||
Connects to the running ADMIN api in the api daemon.
|
||||
@@ -26,6 +57,8 @@ import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import IPy
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
@@ -101,6 +134,29 @@ class VpnCommands(object):
|
||||
self.pipe.launch_vpn_instance(project_id)
|
||||
|
||||
|
||||
class ShellCommands(object):
|
||||
def run(self):
|
||||
"Runs a Python interactive interpreter. Tries to use IPython, if it's available."
|
||||
try:
|
||||
import IPython
|
||||
# Explicitly pass an empty list as arguments, because otherwise IPython
|
||||
# would use sys.argv from this script.
|
||||
shell = IPython.Shell.IPShell(argv=[])
|
||||
shell.mainloop()
|
||||
except ImportError:
|
||||
import code
|
||||
try: # Try activating rlcompleter, because it's handy.
|
||||
import readline
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# We don't have to wrap the following import in a 'try', because
|
||||
# we already know 'readline' was imported successfully.
|
||||
import rlcompleter
|
||||
readline.parse_and_bind("tab:complete")
|
||||
code.interact()
|
||||
|
||||
|
||||
class RoleCommands(object):
|
||||
"""Class for managing roles."""
|
||||
|
||||
@@ -218,12 +274,45 @@ class ProjectCommands(object):
|
||||
with open(filename, 'w') as f:
|
||||
f.write(zip_file)
|
||||
|
||||
class FloatingIpCommands(object):
|
||||
"""Class for managing floating ip."""
|
||||
|
||||
def create(self, host, range):
|
||||
"""Creates floating ips for host by range
|
||||
arguments: host ip_range"""
|
||||
for address in IPy.IP(range):
|
||||
db.floating_ip_create(None, {'address': str(address),
|
||||
'host': host})
|
||||
|
||||
def delete(self, ip_range):
|
||||
"""Deletes floating ips by range
|
||||
arguments: range"""
|
||||
for address in IPy.IP(ip_range):
|
||||
db.floating_ip_destroy(None, str(address))
|
||||
|
||||
|
||||
def list(self, host=None):
|
||||
"""Lists all floating ips (optionally by host)
|
||||
arguments: [host]"""
|
||||
if host == None:
|
||||
floating_ips = db.floating_ip_get_all(None)
|
||||
else:
|
||||
floating_ips = db.floating_ip_get_all_by_host(None, host)
|
||||
for floating_ip in floating_ips:
|
||||
instance = None
|
||||
if floating_ip['fixed_ip']:
|
||||
instance = floating_ip['fixed_ip']['instance']['str_id']
|
||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||
floating_ip['address'],
|
||||
instance)
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
('role', RoleCommands),
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('floating', FloatingIpCommands)
|
||||
]
|
||||
|
||||
|
||||
|
43
bin/nova-scheduler
Executable file
43
bin/nova-scheduler
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Twistd daemon for the nova scheduler nodes.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import service
|
||||
from nova import twistd
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = service.Service.create()
|
@@ -33,6 +33,7 @@ SCOPE_ONELEVEL = 1 # not implemented
|
||||
SCOPE_SUBTREE = 2
|
||||
MOD_ADD = 0
|
||||
MOD_DELETE = 1
|
||||
MOD_REPLACE = 2
|
||||
|
||||
|
||||
class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
|
||||
@@ -175,7 +176,7 @@ class FakeLDAP(object):
|
||||
Args:
|
||||
dn -- a dn
|
||||
attrs -- a list of tuples in the following form:
|
||||
([MOD_ADD | MOD_DELETE], attribute, value)
|
||||
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
|
||||
|
||||
"""
|
||||
redis = datastore.Redis.instance()
|
||||
@@ -185,6 +186,8 @@ class FakeLDAP(object):
|
||||
values = _from_json(redis.hget(key, k))
|
||||
if cmd == MOD_ADD:
|
||||
values.append(v)
|
||||
elif cmd == MOD_REPLACE:
|
||||
values = [v]
|
||||
else:
|
||||
values.remove(v)
|
||||
values = redis.hset(key, k, _to_json(values))
|
||||
|
@@ -99,13 +99,6 @@ class LdapDriver(object):
|
||||
dn = FLAGS.ldap_user_subtree
|
||||
return self.__to_user(self.__find_object(dn, query))
|
||||
|
||||
def get_key_pair(self, uid, key_name):
|
||||
"""Retrieve key pair by uid and key name"""
|
||||
dn = 'cn=%s,%s' % (key_name,
|
||||
self.__uid_to_dn(uid))
|
||||
attr = self.__find_object(dn, '(objectclass=novaKeyPair)')
|
||||
return self.__to_key_pair(uid, attr)
|
||||
|
||||
def get_project(self, pid):
|
||||
"""Retrieve project by id"""
|
||||
dn = 'cn=%s,%s' % (pid,
|
||||
@@ -119,12 +112,6 @@ class LdapDriver(object):
|
||||
'(objectclass=novaUser)')
|
||||
return [self.__to_user(attr) for attr in attrs]
|
||||
|
||||
def get_key_pairs(self, uid):
|
||||
"""Retrieve list of key pairs"""
|
||||
attrs = self.__find_objects(self.__uid_to_dn(uid),
|
||||
'(objectclass=novaKeyPair)')
|
||||
return [self.__to_key_pair(uid, attr) for attr in attrs]
|
||||
|
||||
def get_projects(self, uid=None):
|
||||
"""Retrieve list of projects"""
|
||||
pattern = '(objectclass=novaProject)'
|
||||
@@ -154,21 +141,6 @@ class LdapDriver(object):
|
||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||
return self.__to_user(dict(attr))
|
||||
|
||||
def create_key_pair(self, uid, key_name, public_key, fingerprint):
|
||||
"""Create a key pair"""
|
||||
# TODO(vish): possibly refactor this to store keys in their own ou
|
||||
# and put dn reference in the user object
|
||||
attr = [
|
||||
('objectclass', ['novaKeyPair']),
|
||||
('cn', [key_name]),
|
||||
('sshPublicKey', [public_key]),
|
||||
('keyFingerprint', [fingerprint]),
|
||||
]
|
||||
self.conn.add_s('cn=%s,%s' % (key_name,
|
||||
self.__uid_to_dn(uid)),
|
||||
attr)
|
||||
return self.__to_key_pair(uid, dict(attr))
|
||||
|
||||
def create_project(self, name, manager_uid,
|
||||
description=None, member_uids=None):
|
||||
"""Create a project"""
|
||||
@@ -202,6 +174,24 @@ class LdapDriver(object):
|
||||
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
|
||||
return self.__to_project(dict(attr))
|
||||
|
||||
def modify_project(self, project_id, manager_uid=None, description=None):
|
||||
"""Modify an existing project"""
|
||||
if not manager_uid and not description:
|
||||
return
|
||||
attr = []
|
||||
if manager_uid:
|
||||
if not self.__user_exists(manager_uid):
|
||||
raise exception.NotFound("Project can't be modified because "
|
||||
"manager %s doesn't exist" %
|
||||
manager_uid)
|
||||
manager_dn = self.__uid_to_dn(manager_uid)
|
||||
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
|
||||
if description:
|
||||
attr.append((self.ldap.MOD_REPLACE, 'description', description))
|
||||
self.conn.modify_s('cn=%s,%s' % (project_id,
|
||||
FLAGS.ldap_project_subtree),
|
||||
attr)
|
||||
|
||||
def add_to_project(self, uid, project_id):
|
||||
"""Add user to project"""
|
||||
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
|
||||
@@ -265,19 +255,10 @@ class LdapDriver(object):
|
||||
"""Delete a user"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s doesn't exist" % uid)
|
||||
self.__delete_key_pairs(uid)
|
||||
self.__remove_from_all(uid)
|
||||
self.conn.delete_s('uid=%s,%s' % (uid,
|
||||
FLAGS.ldap_user_subtree))
|
||||
|
||||
def delete_key_pair(self, uid, key_name):
|
||||
"""Delete a key pair"""
|
||||
if not self.__key_pair_exists(uid, key_name):
|
||||
raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
|
||||
(key_name, uid))
|
||||
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
|
||||
FLAGS.ldap_user_subtree))
|
||||
|
||||
def delete_project(self, project_id):
|
||||
"""Delete a project"""
|
||||
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
|
||||
@@ -288,10 +269,6 @@ class LdapDriver(object):
|
||||
"""Check if user exists"""
|
||||
return self.get_user(uid) != None
|
||||
|
||||
def __key_pair_exists(self, uid, key_name):
|
||||
"""Check if key pair exists"""
|
||||
return self.get_key_pair(uid, key_name) != None
|
||||
|
||||
def __project_exists(self, project_id):
|
||||
"""Check if project exists"""
|
||||
return self.get_project(project_id) != None
|
||||
@@ -341,13 +318,6 @@ class LdapDriver(object):
|
||||
"""Check if group exists"""
|
||||
return self.__find_object(dn, '(objectclass=groupOfNames)') != None
|
||||
|
||||
def __delete_key_pairs(self, uid):
|
||||
"""Delete all key pairs for user"""
|
||||
keys = self.get_key_pairs(uid)
|
||||
if keys != None:
|
||||
for key in keys:
|
||||
self.delete_key_pair(uid, key['name'])
|
||||
|
||||
@staticmethod
|
||||
def __role_to_dn(role, project_id=None):
|
||||
"""Convert role to corresponding dn"""
|
||||
@@ -472,18 +442,6 @@ class LdapDriver(object):
|
||||
'secret': attr['secretKey'][0],
|
||||
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
||||
|
||||
@staticmethod
|
||||
def __to_key_pair(owner, attr):
|
||||
"""Convert ldap attributes to KeyPair object"""
|
||||
if attr == None:
|
||||
return None
|
||||
return {
|
||||
'id': attr['cn'][0],
|
||||
'name': attr['cn'][0],
|
||||
'owner_id': owner,
|
||||
'public_key': attr['sshPublicKey'][0],
|
||||
'fingerprint': attr['keyFingerprint'][0]}
|
||||
|
||||
def __to_project(self, attr):
|
||||
"""Convert ldap attributes to Project object"""
|
||||
if attr == None:
|
||||
|
@@ -128,24 +128,6 @@ class User(AuthBase):
|
||||
def is_project_manager(self, project):
|
||||
return AuthManager().is_project_manager(self, project)
|
||||
|
||||
def generate_key_pair(self, name):
|
||||
return AuthManager().generate_key_pair(self.id, name)
|
||||
|
||||
def create_key_pair(self, name, public_key, fingerprint):
|
||||
return AuthManager().create_key_pair(self.id,
|
||||
name,
|
||||
public_key,
|
||||
fingerprint)
|
||||
|
||||
def get_key_pair(self, name):
|
||||
return AuthManager().get_key_pair(self.id, name)
|
||||
|
||||
def delete_key_pair(self, name):
|
||||
return AuthManager().delete_key_pair(self.id, name)
|
||||
|
||||
def get_key_pairs(self):
|
||||
return AuthManager().get_key_pairs(self.id)
|
||||
|
||||
def __repr__(self):
|
||||
return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
|
||||
self.name,
|
||||
@@ -154,29 +136,6 @@ class User(AuthBase):
|
||||
self.admin)
|
||||
|
||||
|
||||
class KeyPair(AuthBase):
|
||||
"""Represents an ssh key returned from the datastore
|
||||
|
||||
Even though this object is named KeyPair, only the public key and
|
||||
fingerprint is stored. The user's private key is not saved.
|
||||
"""
|
||||
|
||||
def __init__(self, id, name, owner_id, public_key, fingerprint):
|
||||
AuthBase.__init__(self)
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.owner_id = owner_id
|
||||
self.public_key = public_key
|
||||
self.fingerprint = fingerprint
|
||||
|
||||
def __repr__(self):
|
||||
return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id,
|
||||
self.name,
|
||||
self.owner_id,
|
||||
self.public_key,
|
||||
self.fingerprint)
|
||||
|
||||
|
||||
class Project(AuthBase):
|
||||
"""Represents a Project returned from the datastore"""
|
||||
|
||||
@@ -539,6 +498,26 @@ class AuthManager(object):
|
||||
db.security_group_create({}, values)
|
||||
return project
|
||||
|
||||
def modify_project(self, project, manager_user=None, description=None):
|
||||
"""Modify a project
|
||||
|
||||
@type name: Project or project_id
|
||||
@param project: The project to modify.
|
||||
|
||||
@type manager_user: User or uid
|
||||
@param manager_user: This user will be the new project manager.
|
||||
|
||||
@type description: str
|
||||
@param project: This will be the new description of the project.
|
||||
|
||||
"""
|
||||
if manager_user:
|
||||
manager_user = User.safe_id(manager_user)
|
||||
with self.driver() as drv:
|
||||
drv.modify_project(Project.safe_id(project),
|
||||
manager_user,
|
||||
description)
|
||||
|
||||
def add_to_project(self, user, project):
|
||||
"""Add user to project"""
|
||||
with self.driver() as drv:
|
||||
@@ -659,67 +638,13 @@ class AuthManager(object):
|
||||
return User(**user_dict)
|
||||
|
||||
def delete_user(self, user):
|
||||
"""Deletes a user"""
|
||||
with self.driver() as drv:
|
||||
drv.delete_user(User.safe_id(user))
|
||||
"""Deletes a user
|
||||
|
||||
def generate_key_pair(self, user, key_name):
|
||||
"""Generates a key pair for a user
|
||||
|
||||
Generates a public and private key, stores the public key using the
|
||||
key_name, and returns the private key and fingerprint.
|
||||
|
||||
@type user: User or uid
|
||||
@param user: User for which to create key pair.
|
||||
|
||||
@type key_name: str
|
||||
@param key_name: Name to use for the generated KeyPair.
|
||||
|
||||
@rtype: tuple (private_key, fingerprint)
|
||||
@return: A tuple containing the private_key and fingerprint.
|
||||
"""
|
||||
# NOTE(vish): generating key pair is slow so check for legal
|
||||
# creation before creating keypair
|
||||
Additionally deletes all users key_pairs"""
|
||||
uid = User.safe_id(user)
|
||||
db.key_pair_destroy_all_by_user(None, uid)
|
||||
with self.driver() as drv:
|
||||
if not drv.get_user(uid):
|
||||
raise exception.NotFound("User %s doesn't exist" % user)
|
||||
if drv.get_key_pair(uid, key_name):
|
||||
raise exception.Duplicate("The keypair %s already exists"
|
||||
% key_name)
|
||||
private_key, public_key, fingerprint = crypto.generate_key_pair()
|
||||
self.create_key_pair(uid, key_name, public_key, fingerprint)
|
||||
return private_key, fingerprint
|
||||
|
||||
def create_key_pair(self, user, key_name, public_key, fingerprint):
|
||||
"""Creates a key pair for user"""
|
||||
with self.driver() as drv:
|
||||
kp_dict = drv.create_key_pair(User.safe_id(user),
|
||||
key_name,
|
||||
public_key,
|
||||
fingerprint)
|
||||
if kp_dict:
|
||||
return KeyPair(**kp_dict)
|
||||
|
||||
def get_key_pair(self, user, key_name):
|
||||
"""Retrieves a key pair for user"""
|
||||
with self.driver() as drv:
|
||||
kp_dict = drv.get_key_pair(User.safe_id(user), key_name)
|
||||
if kp_dict:
|
||||
return KeyPair(**kp_dict)
|
||||
|
||||
def get_key_pairs(self, user):
|
||||
"""Retrieves all key pairs for user"""
|
||||
with self.driver() as drv:
|
||||
kp_list = drv.get_key_pairs(User.safe_id(user))
|
||||
if not kp_list:
|
||||
return []
|
||||
return [KeyPair(**kp_dict) for kp_dict in kp_list]
|
||||
|
||||
def delete_key_pair(self, user, key_name):
|
||||
"""Deletes a key pair for user"""
|
||||
with self.driver() as drv:
|
||||
drv.delete_key_pair(User.safe_id(user), key_name)
|
||||
drv.delete_user(uid)
|
||||
|
||||
def get_credentials(self, user, project=None):
|
||||
"""Get credential zip for user in project"""
|
||||
|
@@ -304,7 +304,10 @@ class APIRequestHandler(tornado.web.RequestHandler):
|
||||
try:
|
||||
failure.raiseException()
|
||||
except exception.ApiError as ex:
|
||||
self._error(type(ex).__name__ + "." + ex.code, ex.message)
|
||||
if ex.code:
|
||||
self._error(ex.code, ex.message)
|
||||
else:
|
||||
self._error(type(ex).__name__, ex.message)
|
||||
# TODO(vish): do something more useful with unknown exceptions
|
||||
except Exception as ex:
|
||||
self._error(type(ex).__name__, str(ex))
|
||||
|
@@ -23,6 +23,7 @@ datastore.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@@ -31,13 +32,14 @@ import IPy
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.auth import rbac
|
||||
from nova.auth import manager
|
||||
from nova.compute.instance_types import INSTANCE_TYPES
|
||||
from nova.endpoint import images
|
||||
|
||||
@@ -47,14 +49,35 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||
|
||||
InvalidInputException = exception.InvalidInputException
|
||||
|
||||
def _gen_key(user_id, key_name):
|
||||
""" Tuck this into AuthManager """
|
||||
class QuotaError(exception.ApiError):
|
||||
"""Quota Exceeeded"""
|
||||
pass
|
||||
|
||||
|
||||
def _gen_key(context, user_id, key_name):
|
||||
"""Generate a key
|
||||
|
||||
This is a module level method because it is slow and we need to defer
|
||||
it into a process pool."""
|
||||
try:
|
||||
mgr = manager.AuthManager()
|
||||
private_key, fingerprint = mgr.generate_key_pair(user_id, key_name)
|
||||
# NOTE(vish): generating key pair is slow so check for legal
|
||||
# creation before creating key_pair
|
||||
try:
|
||||
db.key_pair_get(context, user_id, key_name)
|
||||
raise exception.Duplicate("The key_pair %s already exists"
|
||||
% key_name)
|
||||
except exception.NotFound:
|
||||
pass
|
||||
private_key, public_key, fingerprint = crypto.generate_key_pair()
|
||||
key = {}
|
||||
key['user_id'] = user_id
|
||||
key['name'] = key_name
|
||||
key['public_key'] = public_key
|
||||
key['fingerprint'] = fingerprint
|
||||
db.key_pair_create(context, key)
|
||||
return {'private_key': private_key, 'fingerprint': fingerprint}
|
||||
except Exception as ex:
|
||||
return {'exception': ex}
|
||||
return {'private_key': private_key, 'fingerprint': fingerprint}
|
||||
|
||||
|
||||
class CloudController(object):
|
||||
@@ -87,13 +110,15 @@ class CloudController(object):
|
||||
|
||||
def _get_mpi_data(self, project_id):
|
||||
result = {}
|
||||
for instance in db.instance_get_by_project(project_id):
|
||||
line = '%s slots=%d' % (instance.fixed_ip['str_id'],
|
||||
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
|
||||
if instance['key_name'] in result:
|
||||
result[instance['key_name']].append(line)
|
||||
else:
|
||||
result[instance['key_name']] = [line]
|
||||
for instance in db.instance_get_by_project(None, project_id):
|
||||
if instance['fixed_ip']:
|
||||
line = '%s slots=%d' % (instance['fixed_ip']['str_id'],
|
||||
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
|
||||
key = str(instance['key_name'])
|
||||
if key in result:
|
||||
result[key].append(line)
|
||||
else:
|
||||
result[key] = [line]
|
||||
return result
|
||||
|
||||
def _trigger_refresh_security_group(self, security_group):
|
||||
@@ -119,13 +144,13 @@ class CloudController(object):
|
||||
else:
|
||||
keys = ''
|
||||
hostname = instance_ref['hostname']
|
||||
floating_ip = db.instance_get_floating_ip_address(None,
|
||||
instance_ref['id'])
|
||||
floating_ip = db.instance_get_floating_address(None,
|
||||
instance_ref['id'])
|
||||
data = {
|
||||
'user-data': base64.b64decode(instance_ref['user_data']),
|
||||
'meta-data': {
|
||||
'ami-id': instance_ref['image_id'],
|
||||
'ami-launch-index': instance_ref['ami_launch_index'],
|
||||
'ami-launch-index': instance_ref['launch_index'],
|
||||
'ami-manifest-path': 'FIXME',
|
||||
'block-device-mapping': { # TODO(vish): replace with real data
|
||||
'ami': 'sda1',
|
||||
@@ -141,7 +166,7 @@ class CloudController(object):
|
||||
'local-ipv4': address,
|
||||
'kernel-id': instance_ref['kernel_id'],
|
||||
'placement': {
|
||||
'availaibility-zone': instance_ref['availability_zone'],
|
||||
'availability-zone': 'nova' # TODO(vish): real zone
|
||||
},
|
||||
'public-hostname': hostname,
|
||||
'public-ipv4': floating_ip or '',
|
||||
@@ -165,9 +190,18 @@ class CloudController(object):
|
||||
|
||||
@rbac.allow('all')
|
||||
def describe_regions(self, context, region_name=None, **kwargs):
|
||||
# TODO(vish): region_name is an array. Support filtering
|
||||
return {'regionInfo': [{'regionName': 'nova',
|
||||
'regionUrl': FLAGS.ec2_url}]}
|
||||
if FLAGS.region_list:
|
||||
regions = []
|
||||
for region in FLAGS.region_list:
|
||||
name, _sep, url = region.partition('=')
|
||||
regions.append({'regionName': name,
|
||||
'regionEndpoint': url})
|
||||
else:
|
||||
regions = [{'regionName': 'nova',
|
||||
'regionEndpoint': FLAGS.ec2_url}]
|
||||
if region_name:
|
||||
regions = [r for r in regions if r['regionName'] in region_name]
|
||||
return {'regionInfo': regions }
|
||||
|
||||
@rbac.allow('all')
|
||||
def describe_snapshots(self,
|
||||
@@ -187,18 +221,18 @@ class CloudController(object):
|
||||
|
||||
@rbac.allow('all')
|
||||
def describe_key_pairs(self, context, key_name=None, **kwargs):
|
||||
key_pairs = context.user.get_key_pairs()
|
||||
key_pairs = db.key_pair_get_all_by_user(context, context.user.id)
|
||||
if not key_name is None:
|
||||
key_pairs = [x for x in key_pairs if x.name in key_name]
|
||||
key_pairs = [x for x in key_pairs if x['name'] in key_name]
|
||||
|
||||
result = []
|
||||
for key_pair in key_pairs:
|
||||
# filter out the vpn keys
|
||||
suffix = FLAGS.vpn_key_suffix
|
||||
if context.user.is_admin() or not key_pair.name.endswith(suffix):
|
||||
if context.user.is_admin() or not key_pair['name'].endswith(suffix):
|
||||
result.append({
|
||||
'keyName': key_pair.name,
|
||||
'keyFingerprint': key_pair.fingerprint,
|
||||
'keyName': key_pair['name'],
|
||||
'keyFingerprint': key_pair['fingerprint'],
|
||||
})
|
||||
|
||||
return {'keypairsSet': result}
|
||||
@@ -214,14 +248,18 @@ class CloudController(object):
|
||||
dcall.callback({'keyName': key_name,
|
||||
'keyFingerprint': kwargs['fingerprint'],
|
||||
'keyMaterial': kwargs['private_key']})
|
||||
pool.apply_async(_gen_key, [context.user.id, key_name],
|
||||
# TODO(vish): when context is no longer an object, pass it here
|
||||
pool.apply_async(_gen_key, [None, context.user.id, key_name],
|
||||
callback=_complete)
|
||||
return dcall
|
||||
|
||||
@rbac.allow('all')
|
||||
def delete_key_pair(self, context, key_name, **kwargs):
|
||||
context.user.delete_key_pair(key_name)
|
||||
# aws returns true even if the key doens't exist
|
||||
try:
|
||||
db.key_pair_destroy(context, context.user.id, key_name)
|
||||
except exception.NotFound:
|
||||
# aws returns true even if the key doesn't exist
|
||||
pass
|
||||
return True
|
||||
|
||||
@rbac.allow('all')
|
||||
@@ -419,7 +457,7 @@ class CloudController(object):
|
||||
v['status'] = '%s (%s, %s, %s, %s)' % (
|
||||
volume['status'],
|
||||
volume['user_id'],
|
||||
'host',
|
||||
volume['host'],
|
||||
volume['instance_id'],
|
||||
volume['mountpoint'])
|
||||
if volume['attach_status'] == 'attached':
|
||||
@@ -435,6 +473,14 @@ class CloudController(object):
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def create_volume(self, context, size, **kwargs):
|
||||
# check quota
|
||||
size = int(size)
|
||||
if quota.allowed_volumes(context, 1, size) < 1:
|
||||
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
|
||||
context.project.id, size)
|
||||
raise QuotaError("Volume quota exceeded. You cannot "
|
||||
"create a volume of size %s" %
|
||||
size)
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['user_id'] = context.user.id
|
||||
@@ -444,9 +490,11 @@ class CloudController(object):
|
||||
vol['attach_status'] = "detached"
|
||||
volume_ref = db.volume_create(context, vol)
|
||||
|
||||
rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_ref['id']}})
|
||||
rpc.cast(FLAGS.scheduler_topic,
|
||||
{"method": "create_volume",
|
||||
"args": {"context": None,
|
||||
"topic": FLAGS.volume_topic,
|
||||
"volume_id": volume_ref['id']}})
|
||||
|
||||
return {'volumeSet': [self._format_volume(context, volume_ref)]}
|
||||
|
||||
@@ -455,10 +503,12 @@ class CloudController(object):
|
||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] != "available":
|
||||
raise exception.ApiError("Volume status must be available")
|
||||
if volume_ref['attach_status'] == "attached":
|
||||
raise exception.ApiError("Volume is already attached")
|
||||
instance_ref = db.instance_get_by_str(context, instance_id)
|
||||
host = db.instance_get_host(context, instance_ref['id'])
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "attach_volume",
|
||||
"args": {"context": None,
|
||||
@@ -477,12 +527,12 @@ class CloudController(object):
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
instance_ref = db.volume_get_instance(context, volume_ref['id'])
|
||||
if not instance_ref:
|
||||
raise exception.Error("Volume isn't attached to anything!")
|
||||
raise exception.ApiError("Volume isn't attached to anything!")
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] == "available":
|
||||
raise exception.Error("Volume is already detached")
|
||||
raise exception.ApiError("Volume is already detached")
|
||||
try:
|
||||
host = db.instance_get_host(context, instance_ref['id'])
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "detach_volume",
|
||||
"args": {"context": None,
|
||||
@@ -521,12 +571,14 @@ class CloudController(object):
|
||||
def _format_instances(self, context, reservation_id=None):
|
||||
reservations = {}
|
||||
if reservation_id:
|
||||
instances = db.instance_get_by_reservation(context, reservation_id)
|
||||
instances = db.instance_get_by_reservation(context,
|
||||
reservation_id)
|
||||
else:
|
||||
if not context.user.is_admin():
|
||||
if context.user.is_admin():
|
||||
instances = db.instance_get_all(context)
|
||||
else:
|
||||
instances = db.instance_get_by_project(context, context.project.id)
|
||||
instances = db.instance_get_by_project(context,
|
||||
context.project.id)
|
||||
for instance in instances:
|
||||
if not context.user.is_admin():
|
||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||
@@ -538,15 +590,16 @@ class CloudController(object):
|
||||
'code': instance['state'],
|
||||
'name': instance['state_description']
|
||||
}
|
||||
floating_addr = db.instance_get_floating_address(context,
|
||||
instance['id'])
|
||||
i['publicDnsName'] = floating_addr
|
||||
fixed_addr = db.instance_get_fixed_address(context,
|
||||
instance['id'])
|
||||
fixed_addr = None
|
||||
floating_addr = None
|
||||
if instance['fixed_ip']:
|
||||
fixed_addr = instance['fixed_ip']['str_id']
|
||||
if instance['fixed_ip']['floating_ips']:
|
||||
fixed = instance['fixed_ip']
|
||||
floating_addr = fixed['floating_ips'][0]['str_id']
|
||||
i['privateDnsName'] = fixed_addr
|
||||
if not i['publicDnsName']:
|
||||
i['publicDnsName'] = i['privateDnsName']
|
||||
i['dnsName'] = None
|
||||
i['publicDnsName'] = floating_addr
|
||||
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
|
||||
i['keyName'] = instance['key_name']
|
||||
if context.user.is_admin():
|
||||
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
|
||||
@@ -579,10 +632,13 @@ class CloudController(object):
|
||||
iterator = db.floating_ip_get_by_project(context,
|
||||
context.project.id)
|
||||
for floating_ip_ref in iterator:
|
||||
address = floating_ip_ref['id_str']
|
||||
instance_ref = db.floating_ip_get_instance(address)
|
||||
address = floating_ip_ref['str_id']
|
||||
instance_id = None
|
||||
if (floating_ip_ref['fixed_ip']
|
||||
and floating_ip_ref['fixed_ip']['instance']):
|
||||
instance_id = floating_ip_ref['fixed_ip']['instance']['str_id']
|
||||
address_rv = {'public_ip': address,
|
||||
'instance_id': instance_ref['id_str']}
|
||||
'instance_id': instance_id}
|
||||
if context.user.is_admin():
|
||||
details = "%s (%s)" % (address_rv['instance_id'],
|
||||
floating_ip_ref['project_id'])
|
||||
@@ -593,6 +649,12 @@ class CloudController(object):
|
||||
@rbac.allow('netadmin')
|
||||
@defer.inlineCallbacks
|
||||
def allocate_address(self, context, **kwargs):
|
||||
# check quota
|
||||
if quota.allowed_floating_ips(context, 1) < 1:
|
||||
logging.warn("Quota exceeeded for %s, tried to allocate address",
|
||||
context.project.id)
|
||||
raise QuotaError("Address quota exceeded. You cannot "
|
||||
"allocate any more addresses")
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
public_ip = yield rpc.call(network_topic,
|
||||
{"method": "allocate_floating_ip",
|
||||
@@ -607,9 +669,9 @@ class CloudController(object):
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "deallocate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_ip": floating_ip_ref['str_id']}})
|
||||
{"method": "deallocate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id']}})
|
||||
defer.returnValue({'releaseResponse': ["Address released."]})
|
||||
|
||||
@rbac.allow('netadmin')
|
||||
@@ -620,11 +682,10 @@ class CloudController(object):
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "associate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_ip": floating_ip_ref['str_id'],
|
||||
"fixed_ip": fixed_ip_ref['str_id'],
|
||||
"instance_id": instance_ref['id']}})
|
||||
{"method": "associate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id'],
|
||||
"fixed_address": fixed_ip_ref['str_id']}})
|
||||
defer.returnValue({'associateResponse': ["Address associated."]})
|
||||
|
||||
@rbac.allow('netadmin')
|
||||
@@ -633,26 +694,42 @@ class CloudController(object):
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
rpc.cast(network_topic,
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_ip": floating_ip_ref['str_id']}})
|
||||
{"method": "disassociate_floating_ip",
|
||||
"args": {"context": None,
|
||||
"floating_address": floating_ip_ref['str_id']}})
|
||||
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_network_topic(self, context):
|
||||
"""Retrieves the network host for a project"""
|
||||
network_ref = db.project_get_network(context, context.project.id)
|
||||
host = db.network_get_host(context, network_ref['id'])
|
||||
host = network_ref['host']
|
||||
if not host:
|
||||
host = yield rpc.call(FLAGS.network_topic,
|
||||
{"method": "set_network_host",
|
||||
"args": {"context": None,
|
||||
"project_id": context.project.id}})
|
||||
{"method": "set_network_host",
|
||||
"args": {"context": None,
|
||||
"project_id": context.project.id}})
|
||||
defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
@defer.inlineCallbacks
|
||||
def run_instances(self, context, **kwargs):
|
||||
instance_type = kwargs.get('instance_type', 'm1.small')
|
||||
if instance_type not in INSTANCE_TYPES:
|
||||
raise exception.ApiError("Unknown instance type: %s",
|
||||
instance_type)
|
||||
# check quota
|
||||
max_instances = int(kwargs.get('max_count', 1))
|
||||
min_instances = int(kwargs.get('min_count', max_instances))
|
||||
num_instances = quota.allowed_instances(context,
|
||||
max_instances,
|
||||
instance_type)
|
||||
if num_instances < min_instances:
|
||||
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
||||
context.project.id, min_instances)
|
||||
raise QuotaError("Instance quota exceeded. You can only "
|
||||
"run %s more instances of this type." %
|
||||
num_instances, "InstanceLimitExceeded")
|
||||
# make sure user can access the image
|
||||
# vpn image is private so it doesn't show up on lists
|
||||
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
|
||||
@@ -674,15 +751,14 @@ class CloudController(object):
|
||||
images.get(context, kernel_id)
|
||||
images.get(context, ramdisk_id)
|
||||
|
||||
logging.debug("Going to run instances...")
|
||||
logging.debug("Going to run %s instances...", num_instances)
|
||||
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||
key_data = None
|
||||
if kwargs.has_key('key_name'):
|
||||
key_pair = context.user.get_key_pair(kwargs['key_name'])
|
||||
if not key_pair:
|
||||
raise exception.ApiError('Key Pair %s not found' %
|
||||
kwargs['key_name'])
|
||||
key_data = key_pair.public_key
|
||||
key_pair_ref = db.key_pair_get(context,
|
||||
context.user.id,
|
||||
kwargs['key_name'])
|
||||
key_data = key_pair_ref['public_key']
|
||||
|
||||
security_group_arg = kwargs.get('security_group', ["default"])
|
||||
if not type(security_group_arg) is list:
|
||||
@@ -697,6 +773,7 @@ class CloudController(object):
|
||||
|
||||
reservation_id = utils.generate_uid('r')
|
||||
base_options = {}
|
||||
base_options['state_description'] = 'scheduling'
|
||||
base_options['image_id'] = image_id
|
||||
base_options['kernel_id'] = kernel_id
|
||||
base_options['ramdisk_id'] = ramdisk_id
|
||||
@@ -706,10 +783,15 @@ class CloudController(object):
|
||||
base_options['user_id'] = context.user.id
|
||||
base_options['project_id'] = context.project.id
|
||||
base_options['user_data'] = kwargs.get('user_data', '')
|
||||
base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
||||
|
||||
for num in range(int(kwargs['max_count'])):
|
||||
inst_id = db.instance_create(context, base_options)
|
||||
type_data = INSTANCE_TYPES[instance_type]
|
||||
base_options['memory_mb'] = type_data['memory_mb']
|
||||
base_options['vcpus'] = type_data['vcpus']
|
||||
base_options['local_gb'] = type_data['local_gb']
|
||||
|
||||
for num in range(num_instances):
|
||||
instance_ref = db.instance_create(context, base_options)
|
||||
inst_id = instance_ref['id']
|
||||
|
||||
for security_group_id in security_groups:
|
||||
db.instance_add_security_group(context, inst_id,
|
||||
@@ -718,7 +800,7 @@ class CloudController(object):
|
||||
inst = {}
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['launch_index'] = num
|
||||
inst['hostname'] = inst_id
|
||||
inst['hostname'] = instance_ref['str_id']
|
||||
db.instance_update(context, inst_id, inst)
|
||||
address = self.network_manager.allocate_fixed_ip(context,
|
||||
inst_id,
|
||||
@@ -732,11 +814,12 @@ class CloudController(object):
|
||||
"args": {"context": None,
|
||||
"address": address}})
|
||||
|
||||
rpc.cast(FLAGS.compute_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"context": None,
|
||||
"instance_id": inst_id}})
|
||||
logging.debug("Casting to node for %s/%s's instance %s" %
|
||||
rpc.cast(FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"context": None,
|
||||
"topic": FLAGS.compute_topic,
|
||||
"instance_id": inst_id}})
|
||||
logging.debug("Casting to scheduler for %s/%s's instance %s" %
|
||||
(context.project.name, context.user.name, inst_id))
|
||||
defer.returnValue(self._format_run_instances(context,
|
||||
reservation_id))
|
||||
@@ -755,6 +838,10 @@ class CloudController(object):
|
||||
% id_str)
|
||||
continue
|
||||
|
||||
now = datetime.datetime.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_ref['id'],
|
||||
{'terminated_at': now})
|
||||
# FIXME(ja): where should network deallocate occur?
|
||||
address = db.instance_get_floating_address(context,
|
||||
instance_ref['id'])
|
||||
@@ -776,9 +863,9 @@ class CloudController(object):
|
||||
# NOTE(vish): Currently, nothing needs to be done on the
|
||||
# network node until release. If this changes,
|
||||
# we will need to cast here.
|
||||
db.fixed_ip_deallocate(context, address)
|
||||
self.network_manager.deallocate_fixed_ip(context, address)
|
||||
|
||||
host = db.instance_get_host(context, instance_ref['id'])
|
||||
host = instance_ref['host']
|
||||
if host:
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "terminate_instance",
|
||||
@@ -793,7 +880,7 @@ class CloudController(object):
|
||||
"""instance_id is a list of instance ids"""
|
||||
for id_str in instance_id:
|
||||
instance_ref = db.instance_get_by_str(context, id_str)
|
||||
host = db.instance_get_host(context, instance_ref['id'])
|
||||
host = instance_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"context": None,
|
||||
@@ -804,11 +891,15 @@ class CloudController(object):
|
||||
def delete_volume(self, context, volume_id, **kwargs):
|
||||
# TODO: return error if not authorized
|
||||
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||
host = db.volume_get_host(context, volume_ref['id'])
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
if volume_ref['status'] != "available":
|
||||
raise exception.ApiError("Volume status must be available")
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context, volume_ref['id'], {'terminated_at': now})
|
||||
host = volume_ref['host']
|
||||
rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
|
||||
{"method": "delete_volume",
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_id}})
|
||||
"volume_id": volume_ref['id']}})
|
||||
return defer.succeed(True)
|
||||
|
||||
@rbac.allow('all')
|
||||
|
@@ -167,10 +167,14 @@ def DECLARE(name, module_string, flag_values=FLAGS):
|
||||
# Define any app-specific flags in their own files, docs at:
|
||||
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
|
||||
|
||||
DEFINE_list('region_list',
|
||||
[],
|
||||
'list of region=url pairs separated by commas')
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||
DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on')
|
||||
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
|
||||
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
|
||||
|
||||
@@ -213,6 +217,8 @@ DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
||||
'Manager for network')
|
||||
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
||||
'Manager for volume')
|
||||
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
|
||||
DEFINE_string('host', socket.gethostname(),
|
||||
'name of this node')
|
||||
|
@@ -30,7 +30,7 @@ from twisted.internet import protocol
|
||||
from twisted.internet import reactor
|
||||
|
||||
from nova import flags
|
||||
from nova.utils import ProcessExecutionError
|
||||
from nova.exception import ProcessExecutionError
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('process_pool_size', 4,
|
||||
@@ -127,7 +127,7 @@ def get_process_output(executable, args=None, env=None, path=None,
|
||||
deferred = defer.Deferred()
|
||||
cmd = executable
|
||||
if args:
|
||||
cmd = cmd + " " + ' '.join(args)
|
||||
cmd = " ".join([cmd] + args)
|
||||
logging.debug("Running cmd: %s", cmd)
|
||||
process_handler = BackRelayWithInput(
|
||||
deferred,
|
||||
@@ -141,8 +141,8 @@ def get_process_output(executable, args=None, env=None, path=None,
|
||||
executable = str(executable)
|
||||
if not args is None:
|
||||
args = [str(x) for x in args]
|
||||
process_reactor.spawnProcess( process_handler, executable,
|
||||
(executable,)+tuple(args), env, path)
|
||||
process_reactor.spawnProcess(process_handler, executable,
|
||||
(executable,)+tuple(args), env, path)
|
||||
return deferred
|
||||
|
||||
|
||||
|
25
nova/scheduler/__init__.py
Normal file
25
nova/scheduler/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2010 Openstack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`nova.scheduler` -- Scheduler Nodes
|
||||
=====================================================
|
||||
|
||||
.. automodule:: nova.scheduler
|
||||
:platform: Unix
|
||||
:synopsis: Module that picks a compute node to run a VM instance.
|
||||
.. moduleauthor:: Chris Behrens <cbehrens@codestud.com>
|
||||
"""
|
90
nova/scheduler/simple.py
Normal file
90
nova/scheduler/simple.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2010 Openstack, LLC.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import chance
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer("max_cores", 16,
|
||||
"maximum number of instance cores to allow per host")
|
||||
flags.DEFINE_integer("max_gigabytes", 10000,
|
||||
"maximum number of volume gigabytes to allow per host")
|
||||
flags.DEFINE_integer("max_networks", 1000,
|
||||
"maximum number of networks to allow per host")
|
||||
|
||||
class SimpleScheduler(chance.ChanceScheduler):
|
||||
"""Implements Naive Scheduler that tries to find least loaded host."""
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||
raise driver.NoValidHost("All hosts have too many cores")
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
||||
raise driver.NoValidHost("All hosts have too many gigabytes")
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
||||
|
||||
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest networks."""
|
||||
|
||||
results = db.service_get_all_network_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_count) = result
|
||||
if instance_count >= FLAGS.max_networks:
|
||||
raise driver.NoValidHost("All hosts have too many networks")
|
||||
if self.service_is_up(service):
|
||||
return service['host']
|
||||
raise driver.NoValidHost("No hosts found")
|
@@ -41,8 +41,8 @@ FLAGS = flags.FLAGS
|
||||
# it's pretty damn circuitous so apologies if you have to fix
|
||||
# a bug in it
|
||||
# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which
|
||||
# isn't controllable since boto's HTTPRequest needs that many
|
||||
# args, and for the version-differentiated import of tornado's
|
||||
# isn't controllable since boto's HTTPRequest needs that many
|
||||
# args, and for the version-differentiated import of tornado's
|
||||
# httputil.
|
||||
# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is
|
||||
# unable to introspect the deferred's return value properly
|
||||
@@ -224,7 +224,8 @@ class ApiEc2TestCase(test.BaseTestCase):
|
||||
for x in range(random.randint(4, 8)))
|
||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.manager.generate_key_pair(user.id, keyname)
|
||||
# NOTE(vish): create depends on pool, so call helper directly
|
||||
cloud._gen_key(None, user.id, keyname)
|
||||
|
||||
rv = self.ec2.get_all_key_pairs()
|
||||
results = [k for k in rv if k.name == keyname]
|
||||
|
@@ -17,8 +17,6 @@
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
from M2Crypto import BIO
|
||||
from M2Crypto import RSA
|
||||
from M2Crypto import X509
|
||||
import unittest
|
||||
|
||||
@@ -65,35 +63,6 @@ class AuthTestCase(test.BaseTestCase):
|
||||
'export S3_URL="http://127.0.0.1:3333/"\n' +
|
||||
'export EC2_USER_ID="test1"\n')
|
||||
|
||||
def test_006_test_key_storage(self):
|
||||
user = self.manager.get_user('test1')
|
||||
user.create_key_pair('public', 'key', 'fingerprint')
|
||||
key = user.get_key_pair('public')
|
||||
self.assertEqual('key', key.public_key)
|
||||
self.assertEqual('fingerprint', key.fingerprint)
|
||||
|
||||
def test_007_test_key_generation(self):
|
||||
user = self.manager.get_user('test1')
|
||||
private_key, fingerprint = user.generate_key_pair('public2')
|
||||
key = RSA.load_key_string(private_key, callback=lambda: None)
|
||||
bio = BIO.MemoryBuffer()
|
||||
public_key = user.get_key_pair('public2').public_key
|
||||
key.save_pub_key_bio(bio)
|
||||
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
|
||||
# assert key fields are equal
|
||||
self.assertEqual(public_key.split(" ")[1].strip(),
|
||||
converted.split(" ")[1].strip())
|
||||
|
||||
def test_008_can_list_key_pairs(self):
|
||||
keys = self.manager.get_user('test1').get_key_pairs()
|
||||
self.assertTrue(filter(lambda k: k.name == 'public', keys))
|
||||
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
|
||||
|
||||
def test_009_can_delete_key_pair(self):
|
||||
self.manager.get_user('test1').delete_key_pair('public')
|
||||
keys = self.manager.get_user('test1').get_key_pairs()
|
||||
self.assertFalse(filter(lambda k: k.name == 'public', keys))
|
||||
|
||||
def test_010_can_list_users(self):
|
||||
users = self.manager.get_users()
|
||||
logging.warn(users)
|
||||
@@ -204,6 +173,12 @@ class AuthTestCase(test.BaseTestCase):
|
||||
self.assert_(len(self.manager.get_projects()) > 1)
|
||||
self.assertEqual(len(self.manager.get_projects('test2')), 1)
|
||||
|
||||
def test_220_can_modify_project(self):
|
||||
self.manager.modify_project('testproj', 'test2', 'new description')
|
||||
project = self.manager.get_project('testproj')
|
||||
self.assertEqual(project.project_manager_id, 'test2')
|
||||
self.assertEqual(project.description, 'new description')
|
||||
|
||||
def test_299_can_delete_project(self):
|
||||
self.manager.delete_project('testproj')
|
||||
self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
|
||||
|
@@ -17,18 +17,24 @@
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
from M2Crypto import BIO
|
||||
from M2Crypto import RSA
|
||||
import StringIO
|
||||
import time
|
||||
|
||||
from tornado import ioloop
|
||||
from twisted.internet import defer
|
||||
import unittest
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.endpoint import api
|
||||
from nova.endpoint import cloud
|
||||
|
||||
@@ -54,16 +60,21 @@ class CloudTestCase(test.BaseTestCase):
|
||||
proxy=self.compute)
|
||||
self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
|
||||
|
||||
try:
|
||||
manager.AuthManager().create_user('admin', 'admin', 'admin')
|
||||
except: pass
|
||||
admin = manager.AuthManager().get_user('admin')
|
||||
project = manager.AuthManager().create_project('proj', 'admin', 'proj')
|
||||
self.context = api.APIRequestContext(handler=None,project=project,user=admin)
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
||||
self.context = api.APIRequestContext(handler=None,
|
||||
user=self.user,
|
||||
project=self.project)
|
||||
|
||||
def tearDown(self):
|
||||
manager.AuthManager().delete_project('proj')
|
||||
manager.AuthManager().delete_user('admin')
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
super(CloudTestCase, self).setUp()
|
||||
|
||||
def _create_key(self, name):
|
||||
# NOTE(vish): create depends on pool, so just call helper directly
|
||||
return cloud._gen_key(self.context, self.context.user.id, name)
|
||||
|
||||
def test_console_output(self):
|
||||
if FLAGS.connection_type == 'fake':
|
||||
@@ -76,6 +87,33 @@ class CloudTestCase(test.BaseTestCase):
|
||||
self.assert_(output)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
|
||||
|
||||
def test_key_generation(self):
|
||||
result = self._create_key('test')
|
||||
private_key = result['private_key']
|
||||
key = RSA.load_key_string(private_key, callback=lambda: None)
|
||||
bio = BIO.MemoryBuffer()
|
||||
public_key = db.key_pair_get(self.context,
|
||||
self.context.user.id,
|
||||
'test')['public_key']
|
||||
key.save_pub_key_bio(bio)
|
||||
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
|
||||
# assert key fields are equal
|
||||
self.assertEqual(public_key.split(" ")[1].strip(),
|
||||
converted.split(" ")[1].strip())
|
||||
|
||||
def test_describe_key_pairs(self):
|
||||
self._create_key('test1')
|
||||
self._create_key('test2')
|
||||
result = self.cloud.describe_key_pairs(self.context)
|
||||
keys = result["keypairsSet"]
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
|
||||
|
||||
def test_delete_key_pair(self):
|
||||
self._create_key('test')
|
||||
self.cloud.delete_key_pair(self.context, 'test')
|
||||
|
||||
def test_run_instances(self):
|
||||
if FLAGS.connection_type == 'fake':
|
||||
logging.debug("Can't test instances without a real virtual env.")
|
||||
@@ -94,7 +132,7 @@ class CloudTestCase(test.BaseTestCase):
|
||||
rv = yield defer.succeed(time.sleep(1))
|
||||
info = self.cloud._get_instance(instance['instance_id'])
|
||||
logging.debug(info['state'])
|
||||
if info['state'] == node.Instance.RUNNING:
|
||||
if info['state'] == power_state.RUNNING:
|
||||
break
|
||||
self.assert_(rv)
|
||||
|
||||
|
@@ -18,6 +18,8 @@
|
||||
"""
|
||||
Tests For Compute
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -48,6 +50,7 @@ class ComputeTestCase(test.TrialTestCase):
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
super(ComputeTestCase, self).tearDown()
|
||||
|
||||
def _create_instance(self):
|
||||
"""Create a test instance"""
|
||||
@@ -60,7 +63,7 @@ class ComputeTestCase(test.TrialTestCase):
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
return db.instance_create(self.context, inst)
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_terminate(self):
|
||||
@@ -79,6 +82,24 @@ class ComputeTestCase(test.TrialTestCase):
|
||||
logging.info("After terminating instances: %s", instances)
|
||||
self.assertEqual(len(instances), 0)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_terminate_timestamps(self):
|
||||
"""Make sure timestamps are set for launched and destroyed"""
|
||||
instance_id = self._create_instance()
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assertEqual(instance_ref['launched_at'], None)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
launch = datetime.datetime.utcnow()
|
||||
yield self.compute.run_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] > launch)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
terminate = datetime.datetime.utcnow()
|
||||
yield self.compute.terminate_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get({'deleted': True}, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] < terminate)
|
||||
self.assert_(instance_ref['deleted_at'] > terminate)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_reboot(self):
|
||||
"""Ensure instance can be rebooted"""
|
||||
|
@@ -28,6 +28,7 @@ from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.endpoint import api
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@@ -48,7 +49,7 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
||||
self.projects = []
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
self.context = None
|
||||
self.context = api.APIRequestContext(None, project=None, user=self.user)
|
||||
for i in range(5):
|
||||
name = 'project%s' % i
|
||||
self.projects.append(self.manager.create_project(name,
|
||||
@@ -56,12 +57,12 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
name))
|
||||
# create the necessary network data for the project
|
||||
self.network.set_network_host(self.context, self.projects[i].id)
|
||||
instance_id = db.instance_create(None,
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': utils.generate_mac()})
|
||||
self.instance_id = instance_id
|
||||
instance_id = db.instance_create(None,
|
||||
self.instance_id = instance_ref['id']
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': utils.generate_mac()})
|
||||
self.instance2_id = instance_id
|
||||
self.instance2_id = instance_ref['id']
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
super(NetworkTestCase, self).tearDown()
|
||||
@@ -75,12 +76,10 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
|
||||
def _create_address(self, project_num, instance_id=None):
|
||||
"""Create an address in given project num"""
|
||||
net = db.project_get_network(None, self.projects[project_num].id)
|
||||
address = db.fixed_ip_allocate(None, net['id'])
|
||||
if instance_id is None:
|
||||
instance_id = self.instance_id
|
||||
db.fixed_ip_instance_associate(None, address, instance_id)
|
||||
return address
|
||||
self.context.project = self.projects[project_num]
|
||||
return self.network.allocate_fixed_ip(self.context, instance_id)
|
||||
|
||||
def test_public_network_association(self):
|
||||
"""Makes sure that we can allocaate a public ip"""
|
||||
@@ -103,14 +102,14 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
address = db.instance_get_floating_address(None, self.instance_id)
|
||||
self.assertEqual(address, None)
|
||||
self.network.deallocate_floating_ip(self.context, float_addr)
|
||||
db.fixed_ip_deallocate(None, fix_addr)
|
||||
self.network.deallocate_fixed_ip(self.context, fix_addr)
|
||||
|
||||
def test_allocate_deallocate_fixed_ip(self):
|
||||
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
||||
address = self._create_address(0)
|
||||
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||
lease_ip(address)
|
||||
db.fixed_ip_deallocate(None, address)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
|
||||
# Doesn't go away until it's dhcp released
|
||||
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||
@@ -131,14 +130,14 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
lease_ip(address)
|
||||
lease_ip(address2)
|
||||
|
||||
db.fixed_ip_deallocate(None, address)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
release_ip(address)
|
||||
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
|
||||
|
||||
# First address release shouldn't affect the second
|
||||
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
|
||||
|
||||
db.fixed_ip_deallocate(None, address2)
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
release_ip(address2)
|
||||
self.assertFalse(is_allocated_in_project(address2,
|
||||
self.projects[1].id))
|
||||
@@ -147,10 +146,23 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
"""Makes sure that private ips don't overlap"""
|
||||
first = self._create_address(0)
|
||||
lease_ip(first)
|
||||
instance_ids = []
|
||||
for i in range(1, 5):
|
||||
address = self._create_address(i)
|
||||
address2 = self._create_address(i)
|
||||
address3 = self._create_address(i)
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address = self._create_address(i, instance_ref['id'])
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address2 = self._create_address(i, instance_ref['id'])
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address3 = self._create_address(i, instance_ref['id'])
|
||||
lease_ip(address)
|
||||
lease_ip(address2)
|
||||
lease_ip(address3)
|
||||
@@ -160,14 +172,16 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
self.projects[0].id))
|
||||
self.assertFalse(is_allocated_in_project(address3,
|
||||
self.projects[0].id))
|
||||
db.fixed_ip_deallocate(None, address)
|
||||
db.fixed_ip_deallocate(None, address2)
|
||||
db.fixed_ip_deallocate(None, address3)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
self.network.deallocate_fixed_ip(self.context, address3)
|
||||
release_ip(address)
|
||||
release_ip(address2)
|
||||
release_ip(address3)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(None, instance_id)
|
||||
release_ip(first)
|
||||
db.fixed_ip_deallocate(None, first)
|
||||
self.network.deallocate_fixed_ip(self.context, first)
|
||||
|
||||
def test_vpn_ip_and_port_looks_valid(self):
|
||||
"""Ensure the vpn ip and port are reasonable"""
|
||||
@@ -194,12 +208,12 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
"""Makes sure that ip addresses that are deallocated get reused"""
|
||||
address = self._create_address(0)
|
||||
lease_ip(address)
|
||||
db.fixed_ip_deallocate(None, address)
|
||||
self.network.deallocate_fixed_ip(self.context, address)
|
||||
release_ip(address)
|
||||
|
||||
address2 = self._create_address(0)
|
||||
self.assertEqual(address, address2)
|
||||
db.fixed_ip_deallocate(None, address2)
|
||||
self.network.deallocate_fixed_ip(self.context, address2)
|
||||
|
||||
def test_available_ips(self):
|
||||
"""Make sure the number of available ips for the network is correct
|
||||
@@ -226,21 +240,27 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
num_available_ips = db.network_count_available_ips(None,
|
||||
network['id'])
|
||||
addresses = []
|
||||
instance_ids = []
|
||||
for i in range(num_available_ips):
|
||||
address = self._create_address(0)
|
||||
mac = utils.generate_mac()
|
||||
instance_ref = db.instance_create(None,
|
||||
{'mac_address': mac})
|
||||
instance_ids.append(instance_ref['id'])
|
||||
address = self._create_address(0, instance_ref['id'])
|
||||
addresses.append(address)
|
||||
lease_ip(address)
|
||||
|
||||
self.assertEqual(db.network_count_available_ips(None,
|
||||
network['id']), 0)
|
||||
self.assertRaises(db.NoMoreAddresses,
|
||||
db.fixed_ip_allocate,
|
||||
None,
|
||||
network['id'])
|
||||
self.network.allocate_fixed_ip,
|
||||
self.context,
|
||||
'foo')
|
||||
|
||||
for i in range(len(addresses)):
|
||||
db.fixed_ip_deallocate(None, addresses[i])
|
||||
for i in range(num_available_ips):
|
||||
self.network.deallocate_fixed_ip(self.context, addresses[i])
|
||||
release_ip(addresses[i])
|
||||
db.instance_destroy(None, instance_ids[i])
|
||||
self.assertEqual(db.network_count_available_ips(None,
|
||||
network['id']),
|
||||
num_available_ips)
|
||||
@@ -263,7 +283,10 @@ def binpath(script):
|
||||
def lease_ip(private_ip):
|
||||
"""Run add command on dhcpbridge"""
|
||||
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||
cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
|
||||
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||
instance_ref['mac_address'],
|
||||
private_ip)
|
||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
@@ -274,7 +297,10 @@ def lease_ip(private_ip):
|
||||
def release_ip(private_ip):
|
||||
"""Run del command on dhcpbridge"""
|
||||
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||
cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
|
||||
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||
instance_ref['mac_address'],
|
||||
private_ip)
|
||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
|
155
nova/tests/quota_unittest.py
Normal file
155
nova/tests/quota_unittest.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.endpoint import cloud
|
||||
from nova.endpoint import api
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class QuotaTestCase(test.TrialTestCase):
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(QuotaTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
quota_instances=2,
|
||||
quota_cores=4,
|
||||
quota_volumes=2,
|
||||
quota_gigabytes=20,
|
||||
quota_floating_ips=1)
|
||||
|
||||
self.cloud = cloud.CloudController()
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
self.context = api.APIRequestContext(handler=None,
|
||||
project=self.project,
|
||||
user=self.user)
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
manager.AuthManager().delete_project(self.project)
|
||||
manager.AuthManager().delete_user(self.user)
|
||||
super(QuotaTestCase, self).tearDown()
|
||||
|
||||
def _create_instance(self, cores=2):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.large'
|
||||
inst['vcpus'] = cores
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self, size=10):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['user_id'] = self.user.id
|
||||
vol['project_id'] = self.project.id
|
||||
vol['size'] = size
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_quota_overrides(self):
|
||||
"""Make sure overriding a projects quotas works"""
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 2)
|
||||
db.quota_create(self.context, {'project_id': self.project.id,
|
||||
'instances': 10})
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 4)
|
||||
db.quota_update(self.context, self.project.id, {'cores': 100})
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 10)
|
||||
db.quota_destroy(self.context, self.project.id)
|
||||
|
||||
def test_too_many_instances(self):
|
||||
instance_ids = []
|
||||
for i in range(FLAGS.quota_instances):
|
||||
instance_id = self._create_instance()
|
||||
instance_ids.append(instance_id)
|
||||
self.assertFailure(self.cloud.run_instances(self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small'),
|
||||
cloud.QuotaError)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_too_many_cores(self):
|
||||
instance_ids = []
|
||||
instance_id = self._create_instance(cores=4)
|
||||
instance_ids.append(instance_id)
|
||||
self.assertFailure(self.cloud.run_instances(self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small'),
|
||||
cloud.QuotaError)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_too_many_volumes(self):
|
||||
volume_ids = []
|
||||
for i in range(FLAGS.quota_volumes):
|
||||
volume_id = self._create_volume()
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(cloud.QuotaError,
|
||||
self.cloud.create_volume,
|
||||
self.context,
|
||||
size=10)
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
def test_too_many_gigabytes(self):
|
||||
volume_ids = []
|
||||
volume_id = self._create_volume(size=20)
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(cloud.QuotaError,
|
||||
self.cloud.create_volume,
|
||||
self.context,
|
||||
size=10)
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
def test_too_many_addresses(self):
|
||||
address = '192.168.0.100'
|
||||
try:
|
||||
db.floating_ip_get_by_address(None, address)
|
||||
except exception.NotFound:
|
||||
db.floating_ip_create(None, {'address': address,
|
||||
'host': FLAGS.host})
|
||||
float_addr = self.network.allocate_floating_ip(self.context,
|
||||
self.project.id)
|
||||
# NOTE(vish): This assert never fails. When cloud attempts to
|
||||
# make an rpc.call, the test just finishes with OK. It
|
||||
# appears to be something in the magic inline callbacks
|
||||
# that is breaking.
|
||||
self.assertFailure(self.cloud.allocate_address(self.context),
|
||||
cloud.QuotaError)
|
231
nova/tests/scheduler_unittest.py
Normal file
231
nova/tests/scheduler_unittest.py
Normal file
@@ -0,0 +1,231 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler
|
||||
"""
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import service
|
||||
from nova import test
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.auth import manager as auth_manager
|
||||
from nova.scheduler import manager
|
||||
from nova.scheduler import driver
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('max_cores', 'nova.scheduler.simple')
|
||||
|
||||
class TestDriver(driver.Scheduler):
|
||||
"""Scheduler Driver for Tests"""
|
||||
def schedule(context, topic, *args, **kwargs):
|
||||
return 'fallback_host'
|
||||
|
||||
def schedule_named_method(context, topic, num):
|
||||
return 'named_host'
|
||||
|
||||
class SchedulerTestCase(test.TrialTestCase):
|
||||
"""Test case for scheduler"""
|
||||
def setUp(self): # pylint: disable=C0103
|
||||
super(SchedulerTestCase, self).setUp()
|
||||
self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
|
||||
|
||||
def test_fallback(self):
|
||||
scheduler = manager.SchedulerManager()
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
rpc.cast('topic.fallback_host',
|
||||
{'method': 'noexist',
|
||||
'args': {'context': None,
|
||||
'num': 7}})
|
||||
self.mox.ReplayAll()
|
||||
scheduler.noexist(None, 'topic', num=7)
|
||||
|
||||
def test_named_method(self):
|
||||
scheduler = manager.SchedulerManager()
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
rpc.cast('topic.named_host',
|
||||
{'method': 'named_method',
|
||||
'args': {'context': None,
|
||||
'num': 7}})
|
||||
self.mox.ReplayAll()
|
||||
scheduler.named_method(None, 'topic', num=7)
|
||||
|
||||
|
||||
class SimpleDriverTestCase(test.TrialTestCase):
|
||||
"""Test case for simple driver"""
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
super(SimpleDriverTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
max_cores=4,
|
||||
max_gigabytes=4,
|
||||
volume_driver='nova.volume.driver.FakeAOEDriver',
|
||||
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
||||
self.scheduler = manager.SchedulerManager()
|
||||
self.context = None
|
||||
self.manager = auth_manager.AuthManager()
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.context = None
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
|
||||
def _create_instance(self):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['vcpus'] = 1
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['image_id'] = 'ami-test'
|
||||
vol['reservation_id'] = 'r-fakeres'
|
||||
vol['size'] = 1
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_hosts_are_up(self):
|
||||
"""Ensures driver can find the hosts that are up"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(len(hosts), 2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_least_busy_host_gets_instance(self):
|
||||
"""Ensures the host with less cores gets the next one"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_too_many_cores(self):
|
||||
"""Ensures we don't go over max cores"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
instance_id = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id)
|
||||
instance_ids1.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
compute2.run_instance(self.context, instance_id)
|
||||
instance_ids2.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id)
|
||||
for instance_id in instance_ids1:
|
||||
compute1.terminate_instance(self.context, instance_id)
|
||||
for instance_id in instance_ids2:
|
||||
compute2.terminate_instance(self.context, instance_id)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_least_busy_host_gets_volume(self):
|
||||
"""Ensures the host with less gigabytes gets the next one"""
|
||||
volume1 = service.Service('host1',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume_id1 = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
host = self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
volume1.delete_volume(self.context, volume_id1)
|
||||
db.volume_destroy(self.context, volume_id2)
|
||||
volume1.kill()
|
||||
volume2.kill()
|
||||
|
||||
def test_too_many_gigabytes(self):
|
||||
"""Ensures we don't go over max gigabytes"""
|
||||
volume1 = service.Service('host1',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume_ids1 = []
|
||||
volume_ids2 = []
|
||||
for index in xrange(FLAGS.max_gigabytes):
|
||||
volume_id = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id)
|
||||
volume_ids1.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
volume2.create_volume(self.context, volume_id)
|
||||
volume_ids2.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_create_volume,
|
||||
self.context,
|
||||
volume_id)
|
||||
for volume_id in volume_ids1:
|
||||
volume1.delete_volume(self.context, volume_id)
|
||||
for volume_id in volume_ids2:
|
||||
volume2.delete_volume(self.context, volume_id)
|
||||
volume1.kill()
|
||||
volume2.kill()
|
@@ -87,7 +87,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
host,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
service.db.service_create(None,
|
||||
service_create).AndReturn(service_ref['id'])
|
||||
service_create).AndReturn(service_ref)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
app = service.Service.create(host=host, binary=binary)
|
||||
@@ -131,7 +131,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
host,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
service.db.service_create(None,
|
||||
service_create).AndReturn(service_ref['id'])
|
||||
service_create).AndReturn(service_ref)
|
||||
service.db.service_get(None, service_ref['id']).AndReturn(service_ref)
|
||||
service.db.service_update(None, service_ref['id'],
|
||||
mox.ContainsKeyValue('report_count', 1))
|
||||
|
@@ -108,7 +108,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
instance_id = db.instance_create(self.context, inst)
|
||||
instance_id = db.instance_create(self.context, inst)['id']
|
||||
mountpoint = "/dev/sdf"
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
|
@@ -58,7 +58,9 @@ from nova.tests.flags_unittest import *
|
||||
from nova.tests.network_unittest import *
|
||||
from nova.tests.objectstore_unittest import *
|
||||
from nova.tests.process_unittest import *
|
||||
from nova.tests.quota_unittest import *
|
||||
from nova.tests.rpc_unittest import *
|
||||
from nova.tests.scheduler_unittest import *
|
||||
from nova.tests.service_unittest import *
|
||||
from nova.tests.validator_unittest import *
|
||||
from nova.tests.volume_unittest import *
|
||||
|
Reference in New Issue
Block a user