Merge w/ trunk.
This commit is contained in:
2
Authors
2
Authors
@@ -30,6 +30,7 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
|
||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||
Jason Koelker <jason@koelker.net>
|
||||
Jay Pipes <jaypipes@gmail.com>
|
||||
Jesse Andrews <anotherjesse@gmail.com>
|
||||
@@ -83,6 +84,7 @@ Trey Morris <trey.morris@rackspace.com>
|
||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
Vivek Y S <vivek.ys@gmail.com>
|
||||
William Wolf <throughnothing@gmail.com>
|
||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
|
||||
NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
|
||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||
export EC2_SECRET_KEY="%(secret)s"
|
||||
export EC2_URL="%(ec2)s"
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Host Filter is a driver mechanism for requesting instance resources.
|
||||
Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
Host Filter is a mechanism for requesting instance resources.
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
@@ -42,17 +42,18 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter_driver',
|
||||
flags.DEFINE_string('default_host_filter',
|
||||
'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'Which driver to use for filtering hosts.')
|
||||
'Which filter to use for filtering hosts.')
|
||||
|
||||
|
||||
class HostFilter(object):
|
||||
"""Base class for host filter drivers."""
|
||||
"""Base class for host filters."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
@@ -63,14 +64,15 @@ class HostFilter(object):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter driver"""
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||
|
||||
|
||||
class AllHostsFilter(HostFilter):
|
||||
"""NOP host filter driver. Returns all hosts in ZoneManager.
|
||||
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||
This essentially does what the old Scheduler+Chance used
|
||||
to give us."""
|
||||
to give us.
|
||||
"""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
@@ -83,8 +85,8 @@ class AllHostsFilter(HostFilter):
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
||||
|
||||
|
||||
class FlavorFilter(HostFilter):
|
||||
"""HostFilter driver hard-coded to work with flavors."""
|
||||
class InstanceTypeFilter(HostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
@@ -98,8 +100,9 @@ class FlavorFilter(HostFilter):
|
||||
capabilities = services.get('compute', {})
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
if host_ram_mb >= instance_type['memory_mb'] and \
|
||||
disk_bytes >= instance_type['local_gb']:
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
@@ -109,15 +112,15 @@ class FlavorFilter(HostFilter):
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776},
|
||||
# 'host_other-config': {},
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736},
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name-label': 'xs-mini'}
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
#name = Column(String(255), unique=True)
|
||||
@@ -131,8 +134,9 @@ class FlavorFilter(HostFilter):
|
||||
|
||||
|
||||
class JsonFilter(HostFilter):
|
||||
"""Host Filter driver to allow simple JSON-based grammar for
|
||||
selecting hosts."""
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
@@ -228,7 +232,8 @@ class JsonFilter(HostFilter):
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'"""
|
||||
form '$service.capability[.subcap*]'
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if string[0] != '$':
|
||||
@@ -271,18 +276,48 @@ class JsonFilter(HostFilter):
|
||||
return hosts
|
||||
|
||||
|
||||
DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
|
||||
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||
|
||||
|
||||
def choose_driver(driver_name=None):
|
||||
"""Since the caller may specify which driver to use we need
|
||||
def choose_host_filter(filter_name=None):
|
||||
"""Since the caller may specify which filter to use we need
|
||||
to have an authoritative list of what is permissible. This
|
||||
function checks the driver name against a predefined set
|
||||
of acceptable drivers."""
|
||||
function checks the filter name against a predefined set
|
||||
of acceptable filters.
|
||||
"""
|
||||
|
||||
if not driver_name:
|
||||
driver_name = FLAGS.default_host_filter_driver
|
||||
for driver in DRIVERS:
|
||||
if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
|
||||
return driver()
|
||||
raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
|
||||
if not filter_name:
|
||||
filter_name = FLAGS.default_host_filter
|
||||
for filter_class in FILTERS:
|
||||
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||
if host_match == filter_name:
|
||||
return filter_class()
|
||||
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||
|
||||
|
||||
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
"""The HostFilterScheduler uses the HostFilter to filter
|
||||
hosts for weighing. The particular filter used may be passed in
|
||||
as an argument or the default will be used.
|
||||
|
||||
request_spec = {'filter': <Filter name>,
|
||||
'instance_type': <InstanceType dict>}
|
||||
"""
|
||||
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
filter_name = request_spec.get('filter', None)
|
||||
host_filter = choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||
return host_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""
|
||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||
|
||||
@@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for:
|
||||
|
||||
import operator
|
||||
|
||||
from nova import db
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova.scheduler import api
|
||||
from nova.scheduler import driver
|
||||
|
||||
@@ -36,7 +38,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs)
|
||||
|
||||
def schedule_run_instance(self, context, topic='compute', specs={},
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
@@ -44,56 +46,83 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
1. Create a Build Plan and then provision, or
|
||||
2. Use the Build Plan information in the request parameters
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone)."""
|
||||
a child zone).
|
||||
"""
|
||||
|
||||
if 'blob' in specs:
|
||||
return self.provision_instance(context, topic, specs)
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
if 'blob' in request_spec:
|
||||
self.provision_resource(context, request_spec, instance_id, kwargs)
|
||||
return None
|
||||
|
||||
# Create build plan and provision ...
|
||||
build_plan = self.select(context, specs)
|
||||
for item in build_plan:
|
||||
self.provision_instance(context, topic, item)
|
||||
build_plan = self.select(context, request_spec)
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
def provision_instance(context, topic, item):
|
||||
"""Create the requested instance in this Zone or a child zone."""
|
||||
for item in build_plan:
|
||||
self.provision_resource(context, item, instance_id, kwargs)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
|
||||
def provision_resource(self, context, item, instance_id, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in item:
|
||||
host = item['hostname']
|
||||
kwargs['instance_id'] = instance_id
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
||||
% locals())
|
||||
else:
|
||||
# TODO(sandy) Provision in child zone ...
|
||||
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
||||
pass
|
||||
|
||||
def select(self, context, *args, **kwargs):
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
corresponding to the best hosts to service the request. Any
|
||||
child zone information has been encrypted so as not to reveal
|
||||
anything about the children."""
|
||||
return self._schedule(context, "compute", *args, **kwargs)
|
||||
anything about the children.
|
||||
"""
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
|
||||
def schedule(self, context, topic, *args, **kwargs):
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
res = self._schedule(context, topic, *args, **kwargs)
|
||||
# TODO(sirp): should this be a host object rather than a weight-dict?
|
||||
if not res:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
return res[0]
|
||||
|
||||
def _schedule(self, context, topic, *args, **kwargs):
|
||||
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
||||
#TODO(sandy): extract these from args.
|
||||
if topic != "compute":
|
||||
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||
"Compute nodes (for now)"))
|
||||
|
||||
#TODO(sandy): how to infer this from OS API params?
|
||||
num_instances = 1
|
||||
specs = {}
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
host_list = self.filter_hosts(num_instances, specs)
|
||||
host_list = self.filter_hosts(num_instances, request_spec)
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, name=hostname}, ...]
|
||||
weighted = self.weigh_hosts(num_instances, specs, host_list)
|
||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=specs)
|
||||
specs=request_spec)
|
||||
for child_zone, result in child_results:
|
||||
for weighting in result:
|
||||
# Remember the child_zone so we can get back to
|
||||
@@ -108,12 +137,14 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
return weighted
|
||||
|
||||
def filter_hosts(self, num, specs):
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Derived classes must override this method and return
|
||||
a list of hosts in [(hostname, capability_dict)] format."""
|
||||
a list of hosts in [(hostname, capability_dict)] format.
|
||||
"""
|
||||
raise NotImplemented()
|
||||
|
||||
def weigh_hosts(self, num, specs, hosts):
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format."""
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""
|
||||
raise NotImplemented()
|
||||
|
||||
@@ -169,6 +169,25 @@ class CloudTestCase(test.TestCase):
|
||||
db.volume_destroy(self.context, vol1['id'])
|
||||
db.volume_destroy(self.context, vol2['id'])
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
"""Makes sure create_volume works when we specify a snapshot."""
|
||||
vol = db.volume_create(self.context, {'size': 1})
|
||||
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||
'volume_size': vol['size'],
|
||||
'status': "available"})
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||
|
||||
result = self.cloud.create_volume(self.context,
|
||||
snapshot_id=snapshot_id)
|
||||
volume_id = result['volumeId']
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 2)
|
||||
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
|
||||
|
||||
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
|
||||
db.snapshot_destroy(self.context, snap['id'])
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_describe_availability_zones(self):
|
||||
"""Makes sure describe_availability_zones works and filters results."""
|
||||
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
||||
@@ -186,6 +205,52 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, service1['id'])
|
||||
db.service_destroy(self.context, service2['id'])
|
||||
|
||||
def test_describe_snapshots(self):
|
||||
"""Makes sure describe_snapshots works and filters results."""
|
||||
vol = db.volume_create(self.context, {})
|
||||
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 2)
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
|
||||
result = self.cloud.describe_snapshots(self.context,
|
||||
snapshot_id=[snapshot_id])
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
self.assertEqual(
|
||||
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
|
||||
snap2['id'])
|
||||
db.snapshot_destroy(self.context, snap1['id'])
|
||||
db.snapshot_destroy(self.context, snap2['id'])
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_create_snapshot(self):
|
||||
"""Makes sure create_snapshot works."""
|
||||
vol = db.volume_create(self.context, {'status': "available"})
|
||||
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||
|
||||
result = self.cloud.create_snapshot(self.context,
|
||||
volume_id=volume_id)
|
||||
snapshot_id = result['snapshotId']
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||
|
||||
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
"""Makes sure delete_snapshot works."""
|
||||
vol = db.volume_create(self.context, {'status': "available"})
|
||||
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||
'status': "available"})
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||
|
||||
result = self.cloud.delete_snapshot(self.context,
|
||||
snapshot_id=snapshot_id)
|
||||
self.assertTrue(result)
|
||||
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_describe_instances(self):
|
||||
"""Makes sure describe_instances works and filters results."""
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filter Drivers.
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -31,7 +31,7 @@ class FakeZoneManager:
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filter drivers."""
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
@@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter_driver
|
||||
FLAGS.default_host_filter_driver = \
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter_driver = self.old_flag
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_driver(self):
|
||||
# Test default driver ...
|
||||
driver = host_filter.choose_driver()
|
||||
self.assertEquals(driver._full_name(),
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid driver ...
|
||||
driver = host_filter.choose_driver(
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
self.assertEquals(driver._full_name(),
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
# Test invalid driver ...
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_driver('does not exist')
|
||||
self.fail("Should not find driver")
|
||||
except exception.SchedulerHostFilterDriverNotFound:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_driver(self):
|
||||
driver = host_filter.AllHostsFilter()
|
||||
cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_flavor_driver(self):
|
||||
driver = host_filter.FlavorFilter()
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_driver(self):
|
||||
driver = host_filter.JsonFilter()
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
@@ -140,7 +141,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -152,7 +153,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -162,7 +163,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -174,35 +175,32 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, cooked)
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$foo', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$.....', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', {}, ['>', '$missing....foo']]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
|
||||
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
@staticmethod
|
||||
def _create_volume(size='0'):
|
||||
def _create_volume(size='0', snapshot_id=None):
|
||||
"""Create a volume object."""
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['snapshot_id'] = snapshot_id
|
||||
vol['user_id'] = 'fake'
|
||||
vol['project_id'] = 'fake'
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
|
||||
self.context,
|
||||
volume_id)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
"""Test volume can be created from a snapshot."""
|
||||
volume_src_id = self._create_volume()
|
||||
self.volume.create_volume(self.context, volume_src_id)
|
||||
snapshot_id = self._create_snapshot(volume_src_id)
|
||||
self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
|
||||
volume_dst_id = self._create_volume(0, snapshot_id)
|
||||
self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
|
||||
self.assertEqual(volume_dst_id, db.volume_get(
|
||||
context.get_admin_context(),
|
||||
volume_dst_id).id)
|
||||
self.assertEqual(snapshot_id, db.volume_get(
|
||||
context.get_admin_context(),
|
||||
volume_dst_id).snapshot_id)
|
||||
|
||||
self.volume.delete_volume(self.context, volume_dst_id)
|
||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||
self.volume.delete_volume(self.context, volume_src_id)
|
||||
|
||||
def test_too_big_volume(self):
|
||||
"""Ensure failure if a too large of a volume is requested."""
|
||||
# FIXME(vish): validation needs to move into the data layer in
|
||||
@@ -176,6 +196,34 @@ class VolumeTestCase(test.TestCase):
|
||||
# This will allow us to test cross-node interactions
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _create_snapshot(volume_id, size='0'):
|
||||
"""Create a snapshot object."""
|
||||
snap = {}
|
||||
snap['volume_size'] = size
|
||||
snap['user_id'] = 'fake'
|
||||
snap['project_id'] = 'fake'
|
||||
snap['volume_id'] = volume_id
|
||||
snap['status'] = "creating"
|
||||
return db.snapshot_create(context.get_admin_context(), snap)['id']
|
||||
|
||||
def test_create_delete_snapshot(self):
|
||||
"""Test snapshot can be created and deleted."""
|
||||
volume_id = self._create_volume()
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
snapshot_id = self._create_snapshot(volume_id)
|
||||
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||
self.assertEqual(snapshot_id,
|
||||
db.snapshot_get(context.get_admin_context(),
|
||||
snapshot_id).id)
|
||||
|
||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.snapshot_get,
|
||||
self.context,
|
||||
snapshot_id)
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
|
||||
class DriverTestCase(test.TestCase):
|
||||
"""Base Test class for Drivers."""
|
||||
|
||||
@@ -591,11 +591,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
||||
bob_shared = self.bob.compute_shared(alice_pub)
|
||||
self.assertEquals(alice_shared, bob_shared)
|
||||
|
||||
def test_encryption(self):
|
||||
msg = "This is a top-secret message"
|
||||
enc = self.alice.encrypt(msg)
|
||||
def _test_encryption(self, message):
|
||||
enc = self.alice.encrypt(message)
|
||||
self.assertFalse(enc.endswith('\n'))
|
||||
dec = self.bob.decrypt(enc)
|
||||
self.assertEquals(dec, msg)
|
||||
self.assertEquals(dec, message)
|
||||
|
||||
def test_encrypt_simple_message(self):
|
||||
self._test_encryption('This is a simple message.')
|
||||
|
||||
def test_encrypt_message_with_newlines_at_end(self):
|
||||
self._test_encryption('This message has a newline at the end.\n')
|
||||
|
||||
def test_encrypt_many_newlines_at_end(self):
|
||||
self._test_encryption('Message with lotsa newlines.\n\n\n')
|
||||
|
||||
def test_encrypt_newlines_inside_message(self):
|
||||
self._test_encryption('Message\nwith\ninterior\nnewlines.')
|
||||
|
||||
def test_encrypt_with_leading_newlines(self):
|
||||
self._test_encryption('\n\nMessage with leading newlines.')
|
||||
|
||||
def test_encrypt_really_long_message(self):
|
||||
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
|
||||
|
||||
def tearDown(self):
|
||||
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
||||
|
||||
@@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
dict(host_filter=None, instance_type={}))
|
||||
|
||||
Reference in New Issue
Block a user