Update migrate script version to 22.
This commit is contained in:
1
Authors
1
Authors
@@ -60,6 +60,7 @@ Mark Washenberger <mark.washenberger@rackspace.com>
|
||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||
Matt Dietz <matt.dietz@rackspace.com>
|
||||
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||
Mike Scherbakov <mihgen@gmail.com>
|
||||
Monsyne Dragon <mdragon@rackspace.com>
|
||||
Monty Taylor <mordred@inaugust.com>
|
||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||
|
||||
@@ -53,7 +53,6 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import glob
|
||||
import json
|
||||
@@ -78,6 +77,7 @@ from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import image
|
||||
from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
@@ -536,7 +536,7 @@ class FloatingIpCommands(object):
|
||||
for floating_ip in floating_ips:
|
||||
instance = None
|
||||
if floating_ip['fixed_ip']:
|
||||
instance = floating_ip['fixed_ip']['instance']['ec2_id']
|
||||
instance = floating_ip['fixed_ip']['instance']['hostname']
|
||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||
floating_ip['address'],
|
||||
instance)
|
||||
@@ -689,7 +689,7 @@ class ServiceCommands(object):
|
||||
"""Show a list of all running services. Filter by host & service name.
|
||||
args: [host] [service]"""
|
||||
ctxt = context.get_admin_context()
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
if host:
|
||||
services = [s for s in services if s['host'] == host]
|
||||
@@ -936,7 +936,7 @@ class ImageCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
self.image_service = image.get_default_image_service()
|
||||
|
||||
def _register(self, container_format, disk_format,
|
||||
path, owner, name=None, is_public='T',
|
||||
@@ -1081,24 +1081,35 @@ class ImageCommands(object):
|
||||
self._convert_images(machine_images)
|
||||
|
||||
|
||||
class ConfigCommands(object):
|
||||
"""Class for exposing the flags defined by flag_file(s)."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def list(self):
|
||||
print FLAGS.FlagsIntoString()
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('account', AccountCommands),
|
||||
('project', ProjectCommands),
|
||||
('role', RoleCommands),
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('fixed', FixedIpCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands),
|
||||
('vm', VmCommands),
|
||||
('service', ServiceCommands),
|
||||
('config', ConfigCommands),
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands),
|
||||
('fixed', FixedIpCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('instance_type', InstanceTypeCommands),
|
||||
('image', ImageCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('version', VersionCommands)]
|
||||
('network', NetworkCommands),
|
||||
('project', ProjectCommands),
|
||||
('role', RoleCommands),
|
||||
('service', ServiceCommands),
|
||||
('shell', ShellCommands),
|
||||
('user', UserCommands),
|
||||
('version', VersionCommands),
|
||||
('vm', VmCommands),
|
||||
('volume', VolumeCommands),
|
||||
('vpn', VpnCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -24,6 +24,7 @@ other backends by creating another class that exposes the same
|
||||
public methods.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import sys
|
||||
|
||||
from nova import exception
|
||||
@@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer',
|
||||
LOG = logging.getLogger("nova.ldapdriver")
|
||||
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
|
||||
|
||||
# TODO(vish): make an abstract base class with the same public methods
|
||||
# to define a set interface for AuthDrivers. I'm delaying
|
||||
# creating this now because I'm expecting an auth refactor
|
||||
@@ -85,6 +92,7 @@ def _clean(attr):
|
||||
|
||||
def sanitize(fn):
|
||||
"""Decorator to sanitize all args"""
|
||||
@functools.wraps(fn)
|
||||
def _wrapped(self, *args, **kwargs):
|
||||
args = [_clean(x) for x in args]
|
||||
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
||||
@@ -103,29 +111,56 @@ class LdapDriver(object):
|
||||
isadmin_attribute = 'isNovaAdmin'
|
||||
project_attribute = 'owner'
|
||||
project_objectclass = 'groupOfNames'
|
||||
conn = None
|
||||
mc = None
|
||||
|
||||
def __init__(self):
|
||||
"""Imports the LDAP module"""
|
||||
self.ldap = __import__('ldap')
|
||||
self.conn = None
|
||||
if FLAGS.ldap_schema_version == 1:
|
||||
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||
LdapDriver.project_attribute = 'projectManager'
|
||||
LdapDriver.project_objectclass = 'novaProject'
|
||||
self.__cache = None
|
||||
if LdapDriver.conn is None:
|
||||
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
|
||||
FLAGS.ldap_password)
|
||||
if LdapDriver.mc is None:
|
||||
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||
|
||||
def __enter__(self):
|
||||
"""Creates the connection to LDAP"""
|
||||
self.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
|
||||
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
|
||||
self.__cache = {}
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Destroys the connection to LDAP"""
|
||||
self.conn.unbind_s()
|
||||
self.__cache = None
|
||||
return False
|
||||
|
||||
def __local_cache(key_fmt):
|
||||
"""Wrap function to cache it's result in self.__cache.
|
||||
Works only with functions with one fixed argument.
|
||||
"""
|
||||
def do_wrap(fn):
|
||||
@functools.wraps(fn)
|
||||
def inner(self, arg, **kwargs):
|
||||
cache_key = key_fmt % (arg,)
|
||||
try:
|
||||
res = self.__cache[cache_key]
|
||||
LOG.debug('Local cache hit for %s by key %s' %
|
||||
(fn.__name__, cache_key))
|
||||
return res
|
||||
except KeyError:
|
||||
res = fn(self, arg, **kwargs)
|
||||
self.__cache[cache_key] = res
|
||||
return res
|
||||
return inner
|
||||
return do_wrap
|
||||
|
||||
@sanitize
|
||||
@__local_cache('uid_user-%s')
|
||||
def get_user(self, uid):
|
||||
"""Retrieve user by id"""
|
||||
attr = self.__get_ldap_user(uid)
|
||||
@@ -134,15 +169,31 @@ class LdapDriver(object):
|
||||
@sanitize
|
||||
def get_user_from_access_key(self, access):
|
||||
"""Retrieve user by access key"""
|
||||
cache_key = 'uak_dn_%s' % (access,)
|
||||
user_dn = self.mc.get(cache_key)
|
||||
if user_dn:
|
||||
user = self.__to_user(
|
||||
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
|
||||
if user:
|
||||
if user['access'] == access:
|
||||
return user
|
||||
else:
|
||||
self.mc.set(cache_key, None)
|
||||
query = '(accessKey=%s)' % access
|
||||
dn = FLAGS.ldap_user_subtree
|
||||
return self.__to_user(self.__find_object(dn, query))
|
||||
user_obj = self.__find_object(dn, query)
|
||||
user = self.__to_user(user_obj)
|
||||
if user:
|
||||
self.mc.set(cache_key, user_obj['dn'][0])
|
||||
return user
|
||||
|
||||
@sanitize
|
||||
@__local_cache('pid_project-%s')
|
||||
def get_project(self, pid):
|
||||
"""Retrieve project by id"""
|
||||
dn = self.__project_to_dn(pid)
|
||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
||||
dn = self.__project_to_dn(pid, search=False)
|
||||
attr = self.__find_object(dn, LdapDriver.project_pattern,
|
||||
scope=self.ldap.SCOPE_BASE)
|
||||
return self.__to_project(attr)
|
||||
|
||||
@sanitize
|
||||
@@ -395,6 +446,7 @@ class LdapDriver(object):
|
||||
"""Check if project exists"""
|
||||
return self.get_project(project_id) is not None
|
||||
|
||||
@__local_cache('uid_attrs-%s')
|
||||
def __get_ldap_user(self, uid):
|
||||
"""Retrieve LDAP user entry by id"""
|
||||
dn = FLAGS.ldap_user_subtree
|
||||
@@ -426,12 +478,20 @@ class LdapDriver(object):
|
||||
if scope is None:
|
||||
# One of the flags is 0!
|
||||
scope = self.ldap.SCOPE_SUBTREE
|
||||
if query is None:
|
||||
query = "(objectClass=*)"
|
||||
try:
|
||||
res = self.conn.search_s(dn, scope, query)
|
||||
except self.ldap.NO_SUCH_OBJECT:
|
||||
return []
|
||||
# Just return the attributes
|
||||
return [attributes for dn, attributes in res]
|
||||
# FIXME(yorik-sar): Whole driver should be refactored to
|
||||
# prevent this hack
|
||||
res1 = []
|
||||
for dn, attrs in res:
|
||||
attrs['dn'] = [dn]
|
||||
res1.append(attrs)
|
||||
return res1
|
||||
|
||||
def __find_role_dns(self, tree):
|
||||
"""Find dns of role objects in given tree"""
|
||||
@@ -564,6 +624,7 @@ class LdapDriver(object):
|
||||
'description': attr.get('description', [None])[0],
|
||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||
|
||||
@__local_cache('uid_dn-%s')
|
||||
def __uid_to_dn(self, uid, search=True):
|
||||
"""Convert uid to dn"""
|
||||
# By default return a generated DN
|
||||
@@ -576,6 +637,7 @@ class LdapDriver(object):
|
||||
userdn = user[0]
|
||||
return userdn
|
||||
|
||||
@__local_cache('pid_dn-%s')
|
||||
def __project_to_dn(self, pid, search=True):
|
||||
"""Convert pid to dn"""
|
||||
# By default return a generated DN
|
||||
@@ -603,16 +665,18 @@ class LdapDriver(object):
|
||||
else:
|
||||
return None
|
||||
|
||||
@__local_cache('dn_uid-%s')
|
||||
def __dn_to_uid(self, dn):
|
||||
"""Convert user dn to uid"""
|
||||
query = '(objectclass=novaUser)'
|
||||
user = self.__find_object(dn, query)
|
||||
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
|
||||
return user[FLAGS.ldap_user_id_attribute][0]
|
||||
|
||||
|
||||
class FakeLdapDriver(LdapDriver):
|
||||
"""Fake Ldap Auth driver"""
|
||||
|
||||
def __init__(self): # pylint: disable=W0231
|
||||
__import__('nova.auth.fakeldap')
|
||||
self.ldap = sys.modules['nova.auth.fakeldap']
|
||||
def __init__(self):
|
||||
import nova.auth.fakeldap
|
||||
sys.modules['ldap'] = nova.auth.fakeldap
|
||||
super(FakeLdapDriver, self).__init__()
|
||||
|
||||
@@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||
LOG = logging.getLogger('nova.auth.manager')
|
||||
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
|
||||
|
||||
class AuthBase(object):
|
||||
"""Base class for objects relating to auth
|
||||
|
||||
@@ -206,6 +212,7 @@ class AuthManager(object):
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
mc = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Returns the AuthManager singleton"""
|
||||
@@ -222,13 +229,8 @@ class AuthManager(object):
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
if driver or not getattr(self, 'driver', None):
|
||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
self.mc = memcache.Client(FLAGS.memcached_servers,
|
||||
debug=0)
|
||||
if AuthManager.mc is None:
|
||||
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||
|
||||
def authenticate(self, access, signature, params, verb='GET',
|
||||
server_string='127.0.0.1:8773', path='/',
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
|
||||
NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
|
||||
NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
|
||||
NOVA_KEY_DIR=${NOVARC%%/*}
|
||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||
export EC2_SECRET_KEY="%(secret)s"
|
||||
export EC2_URL="%(ec2)s"
|
||||
|
||||
@@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False,
|
||||
'should we use fake network devices and addresses')
|
||||
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
||||
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
||||
DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
|
||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||
|
||||
10
nova/log.py
10
nova/log.py
@@ -35,6 +35,7 @@ import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import nova
|
||||
from nova import flags
|
||||
from nova import version
|
||||
|
||||
@@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
|
||||
'eventlet.wsgi.server=WARN'],
|
||||
'list of logger=LEVEL pairs')
|
||||
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
||||
flags.DEFINE_bool('publish_errors', False, 'publish error events')
|
||||
flags.DEFINE_string('logfile', None, 'output to named file')
|
||||
|
||||
|
||||
@@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger):
|
||||
else:
|
||||
self.removeHandler(self.filelog)
|
||||
self.addHandler(self.streamlog)
|
||||
if FLAGS.publish_errors:
|
||||
self.addHandler(PublishErrorsHandler(ERROR))
|
||||
if FLAGS.verbose:
|
||||
self.setLevel(DEBUG)
|
||||
else:
|
||||
self.setLevel(INFO)
|
||||
|
||||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
nova.notifier.api.notify('nova.error.publisher', 'error_notification',
|
||||
nova.notifier.api.ERROR, dict(error=record.msg))
|
||||
|
||||
|
||||
def handle_exception(type, value, tb):
|
||||
extra = {}
|
||||
if FLAGS.verbose:
|
||||
|
||||
@@ -11,9 +11,8 @@
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.import datetime
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from nova import flags
|
||||
@@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload):
|
||||
|
||||
{'message_id': str(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': datetime.datetime.utcnow(),
|
||||
'timestamp': utils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
@@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload):
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=str(datetime.datetime.utcnow()))
|
||||
timestamp=str(utils.utcnow()))
|
||||
driver.notify(msg)
|
||||
|
||||
@@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
|
||||
if new or not hasattr(cls, '_instance'):
|
||||
params = dict(hostname=FLAGS.rabbit_host,
|
||||
port=FLAGS.rabbit_port,
|
||||
ssl=FLAGS.rabbit_use_ssl,
|
||||
userid=FLAGS.rabbit_userid,
|
||||
password=FLAGS.rabbit_password,
|
||||
virtual_host=FLAGS.rabbit_virtual_host)
|
||||
|
||||
@@ -41,6 +41,7 @@ import json
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova import utils
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
|
||||
@@ -226,7 +227,7 @@ class JsonFilter(HostFilter):
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]
|
||||
['>=', '$compute.disk_available', required_disk],
|
||||
]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
|
||||
156
nova/scheduler/least_cost.py
Normal file
156
nova/scheduler/least_cost.py
Normal file
@@ -0,0 +1,156 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Least Cost Scheduler is a mechanism for choosing which host machines to
|
||||
provision a set of resources to. The input of the least-cost-scheduler is a
|
||||
set of objective-functions, called the 'cost-functions', a weight for each
|
||||
cost-function, and a list of candidate hosts (gathered via FilterHosts).
|
||||
|
||||
The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||
'How much weight to give the noop cost function')
|
||||
|
||||
|
||||
def noop_cost_fn(host):
|
||||
"""Return a pre-weight cost of 1 for each host"""
|
||||
return 1
|
||||
|
||||
|
||||
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def fill_first_cost_fn(host):
|
||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||
hosts that don't have enough ram"""
|
||||
hostname, caps = host
|
||||
free_mem = caps['compute']['host_memory_free']
|
||||
return free_mem
|
||||
|
||||
|
||||
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
def get_cost_fns(self):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname} ]"""
|
||||
|
||||
# FIXME(sirp): weigh_hosts should handle more than just instances
|
||||
hostnames = [hostname for hostname, caps in hosts]
|
||||
|
||||
cost_fns = self.get_cost_fns()
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, hostname in zip(costs, hostnames):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
||||
|
||||
def normalize_list(L):
|
||||
"""Normalize an array of numbers such that each element satisfies:
|
||||
0 <= e <= 1"""
|
||||
if not L:
|
||||
return L
|
||||
max_ = max(L)
|
||||
if max_ > 0:
|
||||
return [(float(e) / max_) for e in L]
|
||||
return L
|
||||
|
||||
|
||||
def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
"""Use the weighted-sum method to compute a score for an array of objects.
|
||||
Normalize the results of the objective-functions so that the weights are
|
||||
meaningful regardless of objective-function's range.
|
||||
|
||||
domain - input to be scored
|
||||
weighted_fns - list of weights and functions like:
|
||||
[(weight, objective-functions)]
|
||||
|
||||
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
|
||||
"""
|
||||
# Table of form:
|
||||
# { domain1: [score1, score2, ..., scoreM]
|
||||
# ...
|
||||
# domainN: [score1, score2, ..., scoreM] }
|
||||
score_table = collections.defaultdict(list)
|
||||
for weight, fn in weighted_fns:
|
||||
scores = [fn(elem) for elem in domain]
|
||||
|
||||
if normalize:
|
||||
norm_scores = normalize_list(scores)
|
||||
else:
|
||||
norm_scores = scores
|
||||
|
||||
for idx, score in enumerate(norm_scores):
|
||||
weighted_score = score * weight
|
||||
score_table[idx].append(weighted_score)
|
||||
|
||||
# Sum rows in table to compute score for each element in domain
|
||||
domain_scores = []
|
||||
for idx in sorted(score_table):
|
||||
elem_score = sum(score_table[idx])
|
||||
elem = domain[idx]
|
||||
domain_scores.append(elem_score)
|
||||
|
||||
return domain_scores
|
||||
@@ -21,10 +21,9 @@
|
||||
Simple Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import chance
|
||||
|
||||
@@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
@@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': service['host'],
|
||||
@@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
@@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': service['host'],
|
||||
|
||||
@@ -39,7 +39,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
return api.call_zone_method(context, method, specs=specs)
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
@@ -116,6 +116,9 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
# Filter local hosts based on requirements ...
|
||||
host_list = self.filter_hosts(num_instances, request_spec)
|
||||
|
||||
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
|
||||
# resources, so that we can apply different objective functions to it
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, name=hostname}, ...]
|
||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||
@@ -139,12 +142,16 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Derived classes must override this method and return
|
||||
a list of hosts in [(hostname, capability_dict)] format.
|
||||
a list of hosts in [(hostname, capability_dict)] format.
|
||||
"""
|
||||
raise NotImplemented()
|
||||
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
|
||||
service_states = self.zone_manager.service_states
|
||||
return [(host, services)
|
||||
for host, services in service_states.iteritems()]
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""
|
||||
raise NotImplemented()
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||
|
||||
@@ -17,16 +17,17 @@
|
||||
ZoneManager oversees all communications with child Zones.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import novaclient
|
||||
import thread
|
||||
import traceback
|
||||
|
||||
from datetime import datetime
|
||||
from eventlet import greenpool
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('zone_db_check_interval', 60,
|
||||
@@ -42,7 +43,7 @@ class ZoneState(object):
|
||||
self.name = None
|
||||
self.capabilities = None
|
||||
self.attempt = 0
|
||||
self.last_seen = datetime.min
|
||||
self.last_seen = datetime.datetime.min
|
||||
self.last_exception = None
|
||||
self.last_exception_time = None
|
||||
|
||||
@@ -56,7 +57,7 @@ class ZoneState(object):
|
||||
def update_metadata(self, zone_metadata):
|
||||
"""Update zone metadata after successful communications with
|
||||
child zone."""
|
||||
self.last_seen = datetime.now()
|
||||
self.last_seen = utils.utcnow()
|
||||
self.attempt = 0
|
||||
self.name = zone_metadata.get("name", "n/a")
|
||||
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
||||
@@ -72,7 +73,7 @@ class ZoneState(object):
|
||||
"""Something went wrong. Check to see if zone should be
|
||||
marked as offline."""
|
||||
self.last_exception = exception
|
||||
self.last_exception_time = datetime.now()
|
||||
self.last_exception_time = utils.utcnow()
|
||||
api_url = self.api_url
|
||||
logging.warning(_("'%(exception)s' error talking to "
|
||||
"zone %(api_url)s") % locals())
|
||||
@@ -104,7 +105,7 @@ def _poll_zone(zone):
|
||||
class ZoneManager(object):
|
||||
"""Keeps the zone states updated."""
|
||||
def __init__(self):
|
||||
self.last_zone_db_check = datetime.min
|
||||
self.last_zone_db_check = datetime.datetime.min
|
||||
self.zone_states = {} # { <zone_id> : ZoneState }
|
||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||
self.green_pool = greenpool.GreenPool()
|
||||
@@ -158,10 +159,10 @@ class ZoneManager(object):
|
||||
|
||||
def ping(self, context=None):
|
||||
"""Ping should be called periodically to update zone status."""
|
||||
diff = datetime.now() - self.last_zone_db_check
|
||||
diff = utils.utcnow() - self.last_zone_db_check
|
||||
if diff.seconds >= FLAGS.zone_db_check_interval:
|
||||
logging.debug(_("Updating zone cache from db."))
|
||||
self.last_zone_db_check = datetime.now()
|
||||
self.last_zone_db_check = utils.utcnow()
|
||||
self._refresh_from_db(context)
|
||||
self._poll_zones(context)
|
||||
|
||||
|
||||
0
nova/tests/scheduler/__init__.py
Normal file
0
nova/tests/scheduler/__init__.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200)
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||
|
||||
MB = 1024 * 1024
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeHost(object):
|
||||
def __init__(self, host_id, free_ram, io):
|
||||
self.id = host_id
|
||||
self.free_ram = free_ram
|
||||
self.io = io
|
||||
|
||||
|
||||
class WeightedSumTestCase(test.TestCase):
|
||||
def test_empty_domain(self):
|
||||
domain = []
|
||||
weighted_fns = []
|
||||
result = least_cost.weighted_sum(domain, weighted_fns)
|
||||
expected = []
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_basic_costing(self):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100)
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
|
||||
(2, lambda h: h.io), # Avoid high I/O
|
||||
]
|
||||
|
||||
costs = least_cost.weighted_sum(
|
||||
domain=hosts, weighted_fns=weighted_fns)
|
||||
|
||||
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
|
||||
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
|
||||
# Each 100 iops of IO adds 0.5 points by way of:
|
||||
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
|
||||
expected = [1.5, 2.5, 1.5]
|
||||
self.assertEqual(expected, costs)
|
||||
|
||||
|
||||
class LeastCostSchedulerTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(LeastCostSchedulerTestCase, self).setUp()
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
zone_manager = FakeZoneManager()
|
||||
|
||||
states = test_zone_aware_scheduler.fake_zone_manager_service_states(
|
||||
num_hosts=10)
|
||||
zone_manager.service_states = states
|
||||
|
||||
self.sched = least_cost.LeastCostScheduler()
|
||||
self.sched.zone_manager = zone_manager
|
||||
|
||||
def tearDown(self):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = []
|
||||
|
||||
expected = []
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=1, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=2, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
@@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler
|
||||
from nova.scheduler import zone_manager
|
||||
|
||||
|
||||
def _host_caps(multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
|
||||
def fake_zone_manager_service_states(num_hosts):
|
||||
states = {}
|
||||
for x in xrange(num_hosts):
|
||||
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
|
||||
return states
|
||||
|
||||
|
||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
def filter_hosts(self, num, specs):
|
||||
# NOTE(sirp): this is returning [(hostname, services)]
|
||||
@@ -38,16 +69,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
class FakeZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {
|
||||
'host1': {
|
||||
'compute': {'ram': 1000}
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000}
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000}
|
||||
}
|
||||
}
|
||||
'host1': {
|
||||
'compute': {'ram': 1000},
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000},
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
||||
@@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
||||
super(_AuthManagerBaseTestCase, self).setUp()
|
||||
self.flags(connection_type='fake')
|
||||
self.manager = manager.AuthManager(new=True)
|
||||
self.manager.mc.cache = {}
|
||||
|
||||
def test_create_and_find_user(self):
|
||||
with user_generator(self.manager):
|
||||
|
||||
@@ -254,10 +254,10 @@ class CloudTestCase(test.TestCase):
|
||||
def test_describe_instances(self):
|
||||
"""Makes sure describe_instances works and filters results."""
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host1'})
|
||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host2'})
|
||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||
'availability_zone': 'zone1',
|
||||
@@ -447,7 +447,7 @@ class CloudTestCase(test.TestCase):
|
||||
|
||||
def test_terminate_instances(self):
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host1'})
|
||||
terminate_instances = self.cloud.terminate_instances
|
||||
# valid instance_id
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
Tests For Compute
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
@@ -84,7 +83,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def _create_instance(self, params={}):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 1
|
||||
inst['image_ref'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
@@ -150,7 +149,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
self.assertEqual(len(db.security_group_get_by_instance(
|
||||
@@ -168,7 +167,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
@@ -184,7 +183,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
|
||||
try:
|
||||
@@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase):
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assertEqual(instance_ref['launched_at'], None)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
launch = datetime.datetime.utcnow()
|
||||
launch = utils.utcnow()
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] > launch)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
terminate = datetime.datetime.utcnow()
|
||||
terminate = utils.utcnow()
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
self.context = self.context.elevated(True)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
|
||||
@@ -20,8 +20,6 @@
|
||||
Tests For Console proxy.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
|
||||
@@ -133,13 +133,14 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
]
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
@@ -183,13 +184,11 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
@@ -18,6 +18,7 @@ import eventlet
|
||||
import mox
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||
@@ -160,6 +161,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'image_ref': '123456',
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def lazy_load_library_exists(self):
|
||||
@@ -280,6 +282,68 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_data = dict(self.test_instance)
|
||||
self._check_xml_and_container(instance_data)
|
||||
|
||||
def test_snapshot(self):
|
||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||
|
||||
# Only file-based instance storages are supported at the moment
|
||||
test_xml = """
|
||||
<domain type='kvm'>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='filename'/>
|
||||
</disk>
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
|
||||
class FakeVirtDomain(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def snapshotCreateXML(self, *args):
|
||||
return None
|
||||
|
||||
def XMLDesc(self, *args):
|
||||
return test_xml
|
||||
|
||||
def fake_lookup(instance_name):
|
||||
if instance_name == instance_ref.name:
|
||||
return FakeVirtDomain()
|
||||
|
||||
def fake_execute(*args):
|
||||
# Touch filename to pass 'with open(out_path)'
|
||||
open(args[-1], "a").close()
|
||||
|
||||
# Start test
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
# Assuming that base image already exists in image_service
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
properties = {'instance_id': instance_ref['id'],
|
||||
'user_id': str(self.context.user_id)}
|
||||
snapshot_name = 'test-snap'
|
||||
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||
'status': 'creating', 'properties': properties}
|
||||
# Create new image. It will be updated in snapshot method
|
||||
# To work with it from snapshot, the single image_service is needed
|
||||
recv_meta = image_service.create(context, sent_meta)
|
||||
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn.lookupByName = fake_lookup
|
||||
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||
connection.utils.execute = fake_execute
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.snapshot(instance_ref, recv_meta['id'])
|
||||
|
||||
snapshot = image_service.show(context, recv_meta['id'])
|
||||
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||
self.assertEquals(snapshot['status'], 'active')
|
||||
self.assertEquals(snapshot['name'], snapshot_name)
|
||||
|
||||
def test_multi_nic(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
network_info = _create_network_info(2)
|
||||
@@ -645,6 +709,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
except Exception, e:
|
||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||
|
||||
self.assertTrue(count)
|
||||
|
||||
def test_get_host_ip_addr(self):
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
@@ -21,11 +21,24 @@ import select
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.utils import parse_mailmap, str_dict_replace
|
||||
|
||||
|
||||
class ExceptionTestCase(test.TestCase):
|
||||
@staticmethod
|
||||
def _raise_exc(exc):
|
||||
raise exc()
|
||||
|
||||
def test_exceptions_raise(self):
|
||||
for name in dir(exception):
|
||||
exc = getattr(exception, name)
|
||||
if isinstance(exc, type):
|
||||
self.assertRaises(exc, self._raise_exc, exc)
|
||||
|
||||
|
||||
class ProjectTestCase(test.TestCase):
|
||||
def test_authors_up_to_date(self):
|
||||
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
||||
|
||||
@@ -13,10 +13,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import nova
|
||||
import stubout
|
||||
|
||||
import nova
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova import log
|
||||
from nova import rpc
|
||||
import nova.notifier.api
|
||||
from nova.notifier.api import notify
|
||||
@@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier
|
||||
from nova.notifier import rabbit_notifier
|
||||
from nova import test
|
||||
|
||||
import stubout
|
||||
|
||||
|
||||
class NotifierTestCase(test.TestCase):
|
||||
"""Test case for notifications"""
|
||||
@@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase):
|
||||
notify('publisher_id',
|
||||
'event_type', 'DEBUG', dict(a=3))
|
||||
self.assertEqual(self.test_topic, 'testnotify.debug')
|
||||
|
||||
def test_error_notification(self):
|
||||
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||
'nova.notifier.rabbit_notifier')
|
||||
self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True)
|
||||
LOG = log.getLogger('nova')
|
||||
LOG.setup_from_flags()
|
||||
msgs = []
|
||||
|
||||
def mock_cast(context, topic, data):
|
||||
msgs.append(data)
|
||||
|
||||
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||
LOG.error('foo')
|
||||
self.assertEqual(1, len(msgs))
|
||||
msg = msgs[0]
|
||||
self.assertEqual(msg['event_type'], 'error_notification')
|
||||
self.assertEqual(msg['priority'], 'ERROR')
|
||||
self.assertEqual(msg['payload']['error'], 'foo')
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
vmwareapi_fake.reset()
|
||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||
stubs.set_stubs(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
self.conn = vmwareapi_conn.get_connection(False)
|
||||
|
||||
def _create_instance_in_the_db(self):
|
||||
@@ -64,7 +63,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': "1",
|
||||
'image_ref': "1",
|
||||
'kernel_id': "1",
|
||||
'ramdisk_id': "1",
|
||||
'instance_type': 'm1.large',
|
||||
|
||||
@@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
self.values = {'id': 1,
|
||||
'project_id': 'fake',
|
||||
'user_id': 'fake',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
@@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
stubs.stubout_is_vdi_pv(self.stubs)
|
||||
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
||||
stubs.stub_out_vm_methods(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
fake_utils.stub_out_utils_execute(self.stubs)
|
||||
self.context = context.RequestContext('fake', 'fake', False)
|
||||
self.conn = xenapi_conn.get_connection(False)
|
||||
@@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'id': id,
|
||||
'project_id': proj,
|
||||
'user_id': user,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
@@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||
|
||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
|
||||
instance_type_id="3", os_type="linux",
|
||||
instance_id=1, check_injection=False):
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
values = {'id': instance_id,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': image_id,
|
||||
'image_ref': image_ref,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type_id': instance_type_id,
|
||||
@@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
@@ -641,7 +640,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
self.values = {'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
@@ -652,8 +651,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
fake_utils.stub_out_utils_execute(self.stubs)
|
||||
stubs.stub_out_migration_methods(self.stubs)
|
||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
|
||||
def tearDown(self):
|
||||
super(XenAPIMigrateInstance, self).tearDown()
|
||||
@@ -679,8 +677,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
"""Unit tests for code that detects the ImageType."""
|
||||
def setUp(self):
|
||||
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
|
||||
class FakeInstance(object):
|
||||
pass
|
||||
@@ -697,7 +694,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
def test_instance_disk(self):
|
||||
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK)
|
||||
|
||||
@@ -707,7 +704,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
DISK_RAW is assumed.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||
|
||||
@@ -717,7 +714,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
this case will be 'raw'.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||
|
||||
@@ -727,7 +724,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
this case will be 'vhd'.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs):
|
||||
'name': values['name'],
|
||||
'id': values['id'],
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': values['image_id'],
|
||||
'image_ref': values['image_ref'],
|
||||
'kernel_id': values['kernel_id'],
|
||||
'ramdisk_id': values['ramdisk_id'],
|
||||
'state_description': 'scheduling',
|
||||
|
||||
Reference in New Issue
Block a user