Merge w/ trunk.
This commit is contained in:
2
Authors
2
Authors
@@ -30,6 +30,7 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
|
|||||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||||
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
Jesse Andrews <anotherjesse@gmail.com>
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
@@ -83,6 +84,7 @@ Trey Morris <trey.morris@rackspace.com>
|
|||||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
Vivek Y S <vivek.ys@gmail.com>
|
||||||
William Wolf <throughnothing@gmail.com>
|
William Wolf <throughnothing@gmail.com>
|
||||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
|
NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
|
||||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||||
export EC2_SECRET_KEY="%(secret)s"
|
export EC2_SECRET_KEY="%(secret)s"
|
||||||
export EC2_URL="%(ec2)s"
|
export EC2_URL="%(ec2)s"
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Host Filter is a driver mechanism for requesting instance resources.
|
Host Filter is a mechanism for requesting instance resources.
|
||||||
Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
|
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||||
filter grammar.
|
filter grammar.
|
||||||
@@ -42,17 +42,18 @@ from nova import exception
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('default_host_filter_driver',
|
flags.DEFINE_string('default_host_filter',
|
||||||
'nova.scheduler.host_filter.AllHostsFilter',
|
'nova.scheduler.host_filter.AllHostsFilter',
|
||||||
'Which driver to use for filtering hosts.')
|
'Which filter to use for filtering hosts.')
|
||||||
|
|
||||||
|
|
||||||
class HostFilter(object):
|
class HostFilter(object):
|
||||||
"""Base class for host filter drivers."""
|
"""Base class for host filters."""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Convert instance_type into a filter for most common use-case."""
|
"""Convert instance_type into a filter for most common use-case."""
|
||||||
@@ -63,14 +64,15 @@ class HostFilter(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def _full_name(self):
|
def _full_name(self):
|
||||||
"""module.classname of the filter driver"""
|
"""module.classname of the filter."""
|
||||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||||
|
|
||||||
|
|
||||||
class AllHostsFilter(HostFilter):
|
class AllHostsFilter(HostFilter):
|
||||||
"""NOP host filter driver. Returns all hosts in ZoneManager.
|
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||||
This essentially does what the old Scheduler+Chance used
|
This essentially does what the old Scheduler+Chance used
|
||||||
to give us."""
|
to give us.
|
||||||
|
"""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Return anything to prevent base-class from raising
|
"""Return anything to prevent base-class from raising
|
||||||
@@ -83,8 +85,8 @@ class AllHostsFilter(HostFilter):
|
|||||||
for host, services in zone_manager.service_states.iteritems()]
|
for host, services in zone_manager.service_states.iteritems()]
|
||||||
|
|
||||||
|
|
||||||
class FlavorFilter(HostFilter):
|
class InstanceTypeFilter(HostFilter):
|
||||||
"""HostFilter driver hard-coded to work with flavors."""
|
"""HostFilter hard-coded to work with InstanceType records."""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Use instance_type to filter hosts."""
|
"""Use instance_type to filter hosts."""
|
||||||
@@ -98,9 +100,10 @@ class FlavorFilter(HostFilter):
|
|||||||
capabilities = services.get('compute', {})
|
capabilities = services.get('compute', {})
|
||||||
host_ram_mb = capabilities['host_memory_free']
|
host_ram_mb = capabilities['host_memory_free']
|
||||||
disk_bytes = capabilities['disk_available']
|
disk_bytes = capabilities['disk_available']
|
||||||
if host_ram_mb >= instance_type['memory_mb'] and \
|
spec_ram = instance_type['memory_mb']
|
||||||
disk_bytes >= instance_type['local_gb']:
|
spec_disk = instance_type['local_gb']
|
||||||
selected_hosts.append((host, capabilities))
|
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
||||||
|
selected_hosts.append((host, capabilities))
|
||||||
return selected_hosts
|
return selected_hosts
|
||||||
|
|
||||||
#host entries (currently) are like:
|
#host entries (currently) are like:
|
||||||
@@ -109,15 +112,15 @@ class FlavorFilter(HostFilter):
|
|||||||
# 'host_memory_total': 8244539392,
|
# 'host_memory_total': 8244539392,
|
||||||
# 'host_memory_overhead': 184225792,
|
# 'host_memory_overhead': 184225792,
|
||||||
# 'host_memory_free': 3868327936,
|
# 'host_memory_free': 3868327936,
|
||||||
# 'host_memory_free_computed': 3840843776},
|
# 'host_memory_free_computed': 3840843776,
|
||||||
# 'host_other-config': {},
|
# 'host_other_config': {},
|
||||||
# 'host_ip_address': '192.168.1.109',
|
# 'host_ip_address': '192.168.1.109',
|
||||||
# 'host_cpu_info': {},
|
# 'host_cpu_info': {},
|
||||||
# 'disk_available': 32954957824,
|
# 'disk_available': 32954957824,
|
||||||
# 'disk_total': 50394562560,
|
# 'disk_total': 50394562560,
|
||||||
# 'disk_used': 17439604736},
|
# 'disk_used': 17439604736,
|
||||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||||
# 'host_name-label': 'xs-mini'}
|
# 'host_name_label': 'xs-mini'}
|
||||||
|
|
||||||
# instance_type table has:
|
# instance_type table has:
|
||||||
#name = Column(String(255), unique=True)
|
#name = Column(String(255), unique=True)
|
||||||
@@ -131,8 +134,9 @@ class FlavorFilter(HostFilter):
|
|||||||
|
|
||||||
|
|
||||||
class JsonFilter(HostFilter):
|
class JsonFilter(HostFilter):
|
||||||
"""Host Filter driver to allow simple JSON-based grammar for
|
"""Host Filter to allow simple JSON-based grammar for
|
||||||
selecting hosts."""
|
selecting hosts.
|
||||||
|
"""
|
||||||
|
|
||||||
def _equals(self, args):
|
def _equals(self, args):
|
||||||
"""First term is == all the other terms."""
|
"""First term is == all the other terms."""
|
||||||
@@ -228,7 +232,8 @@ class JsonFilter(HostFilter):
|
|||||||
|
|
||||||
def _parse_string(self, string, host, services):
|
def _parse_string(self, string, host, services):
|
||||||
"""Strings prefixed with $ are capability lookups in the
|
"""Strings prefixed with $ are capability lookups in the
|
||||||
form '$service.capability[.subcap*]'"""
|
form '$service.capability[.subcap*]'
|
||||||
|
"""
|
||||||
if not string:
|
if not string:
|
||||||
return None
|
return None
|
||||||
if string[0] != '$':
|
if string[0] != '$':
|
||||||
@@ -271,18 +276,48 @@ class JsonFilter(HostFilter):
|
|||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
|
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||||
|
|
||||||
|
|
||||||
def choose_driver(driver_name=None):
|
def choose_host_filter(filter_name=None):
|
||||||
"""Since the caller may specify which driver to use we need
|
"""Since the caller may specify which filter to use we need
|
||||||
to have an authoritative list of what is permissible. This
|
to have an authoritative list of what is permissible. This
|
||||||
function checks the driver name against a predefined set
|
function checks the filter name against a predefined set
|
||||||
of acceptable drivers."""
|
of acceptable filters.
|
||||||
|
"""
|
||||||
|
|
||||||
if not driver_name:
|
if not filter_name:
|
||||||
driver_name = FLAGS.default_host_filter_driver
|
filter_name = FLAGS.default_host_filter
|
||||||
for driver in DRIVERS:
|
for filter_class in FILTERS:
|
||||||
if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
|
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||||
return driver()
|
if host_match == filter_name:
|
||||||
raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
|
return filter_class()
|
||||||
|
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||||
|
|
||||||
|
|
||||||
|
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
|
"""The HostFilterScheduler uses the HostFilter to filter
|
||||||
|
hosts for weighing. The particular filter used may be passed in
|
||||||
|
as an argument or the default will be used.
|
||||||
|
|
||||||
|
request_spec = {'filter': <Filter name>,
|
||||||
|
'instance_type': <InstanceType dict>}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def filter_hosts(self, num, request_spec):
|
||||||
|
"""Filter the full host list (from the ZoneManager)"""
|
||||||
|
filter_name = request_spec.get('filter', None)
|
||||||
|
host_filter = choose_host_filter(filter_name)
|
||||||
|
|
||||||
|
# TODO(sandy): We're only using InstanceType-based specs
|
||||||
|
# currently. Later we'll need to snoop for more detailed
|
||||||
|
# host filter requests.
|
||||||
|
instance_type = request_spec['instance_type']
|
||||||
|
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||||
|
return host_filter.filter_hosts(self.zone_manager, query)
|
||||||
|
|
||||||
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
|
"""Derived classes must override this method and return
|
||||||
|
a lists of hosts in [{weight, hostname}] format.
|
||||||
|
"""
|
||||||
|
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for:
|
|||||||
|
|
||||||
import operator
|
import operator
|
||||||
|
|
||||||
|
from nova import db
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova import rpc
|
||||||
from nova.scheduler import api
|
from nova.scheduler import api
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
|
|
||||||
@@ -36,7 +38,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
"""Call novaclient zone method. Broken out for testing."""
|
"""Call novaclient zone method. Broken out for testing."""
|
||||||
return api.call_zone_method(context, method, specs=specs)
|
return api.call_zone_method(context, method, specs=specs)
|
||||||
|
|
||||||
def schedule_run_instance(self, context, topic='compute', specs={},
|
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
"""This method is called from nova.compute.api to provision
|
"""This method is called from nova.compute.api to provision
|
||||||
an instance. However we need to look at the parameters being
|
an instance. However we need to look at the parameters being
|
||||||
@@ -44,56 +46,83 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
1. Create a Build Plan and then provision, or
|
1. Create a Build Plan and then provision, or
|
||||||
2. Use the Build Plan information in the request parameters
|
2. Use the Build Plan information in the request parameters
|
||||||
to simply create the instance (either in this zone or
|
to simply create the instance (either in this zone or
|
||||||
a child zone)."""
|
a child zone).
|
||||||
|
"""
|
||||||
|
|
||||||
if 'blob' in specs:
|
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||||
return self.provision_instance(context, topic, specs)
|
|
||||||
|
if 'blob' in request_spec:
|
||||||
|
self.provision_resource(context, request_spec, instance_id, kwargs)
|
||||||
|
return None
|
||||||
|
|
||||||
# Create build plan and provision ...
|
# Create build plan and provision ...
|
||||||
build_plan = self.select(context, specs)
|
build_plan = self.select(context, request_spec)
|
||||||
|
if not build_plan:
|
||||||
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
|
|
||||||
for item in build_plan:
|
for item in build_plan:
|
||||||
self.provision_instance(context, topic, item)
|
self.provision_resource(context, item, instance_id, kwargs)
|
||||||
|
|
||||||
def provision_instance(context, topic, item):
|
# Returning None short-circuits the routing to Compute (since
|
||||||
"""Create the requested instance in this Zone or a child zone."""
|
# we've already done it here)
|
||||||
pass
|
return None
|
||||||
|
|
||||||
def select(self, context, *args, **kwargs):
|
def provision_resource(self, context, item, instance_id, kwargs):
|
||||||
|
"""Create the requested resource in this Zone or a child zone."""
|
||||||
|
if "hostname" in item:
|
||||||
|
host = item['hostname']
|
||||||
|
kwargs['instance_id'] = instance_id
|
||||||
|
rpc.cast(context,
|
||||||
|
db.queue_get_for(context, "compute", host),
|
||||||
|
{"method": "run_instance",
|
||||||
|
"args": kwargs})
|
||||||
|
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
||||||
|
% locals())
|
||||||
|
else:
|
||||||
|
# TODO(sandy) Provision in child zone ...
|
||||||
|
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
||||||
|
pass
|
||||||
|
|
||||||
|
def select(self, context, request_spec, *args, **kwargs):
|
||||||
"""Select returns a list of weights and zone/host information
|
"""Select returns a list of weights and zone/host information
|
||||||
corresponding to the best hosts to service the request. Any
|
corresponding to the best hosts to service the request. Any
|
||||||
child zone information has been encrypted so as not to reveal
|
child zone information has been encrypted so as not to reveal
|
||||||
anything about the children."""
|
anything about the children.
|
||||||
return self._schedule(context, "compute", *args, **kwargs)
|
"""
|
||||||
|
return self._schedule(context, "compute", request_spec,
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
def schedule(self, context, topic, *args, **kwargs):
|
# TODO(sandy): We're only focused on compute instances right now,
|
||||||
|
# so we don't implement the default "schedule()" method required
|
||||||
|
# of Schedulers.
|
||||||
|
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||||
"""The schedule() contract requires we return the one
|
"""The schedule() contract requires we return the one
|
||||||
best-suited host for this request.
|
best-suited host for this request.
|
||||||
"""
|
"""
|
||||||
res = self._schedule(context, topic, *args, **kwargs)
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
# TODO(sirp): should this be a host object rather than a weight-dict?
|
|
||||||
if not res:
|
|
||||||
raise driver.NoValidHost(_('No hosts were available'))
|
|
||||||
return res[0]
|
|
||||||
|
|
||||||
def _schedule(self, context, topic, *args, **kwargs):
|
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||||
"""Returns a list of hosts that meet the required specs,
|
"""Returns a list of hosts that meet the required specs,
|
||||||
ordered by their fitness.
|
ordered by their fitness.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#TODO(sandy): extract these from args.
|
if topic != "compute":
|
||||||
|
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||||
|
"Compute nodes (for now)"))
|
||||||
|
|
||||||
|
#TODO(sandy): how to infer this from OS API params?
|
||||||
num_instances = 1
|
num_instances = 1
|
||||||
specs = {}
|
|
||||||
|
|
||||||
# Filter local hosts based on requirements ...
|
# Filter local hosts based on requirements ...
|
||||||
host_list = self.filter_hosts(num_instances, specs)
|
host_list = self.filter_hosts(num_instances, request_spec)
|
||||||
|
|
||||||
# then weigh the selected hosts.
|
# then weigh the selected hosts.
|
||||||
# weighted = [{weight=weight, name=hostname}, ...]
|
# weighted = [{weight=weight, name=hostname}, ...]
|
||||||
weighted = self.weigh_hosts(num_instances, specs, host_list)
|
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||||
|
|
||||||
# Next, tack on the best weights from the child zones ...
|
# Next, tack on the best weights from the child zones ...
|
||||||
child_results = self._call_zone_method(context, "select",
|
child_results = self._call_zone_method(context, "select",
|
||||||
specs=specs)
|
specs=request_spec)
|
||||||
for child_zone, result in child_results:
|
for child_zone, result in child_results:
|
||||||
for weighting in result:
|
for weighting in result:
|
||||||
# Remember the child_zone so we can get back to
|
# Remember the child_zone so we can get back to
|
||||||
@@ -108,12 +137,14 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
weighted.sort(key=operator.itemgetter('weight'))
|
weighted.sort(key=operator.itemgetter('weight'))
|
||||||
return weighted
|
return weighted
|
||||||
|
|
||||||
def filter_hosts(self, num, specs):
|
def filter_hosts(self, num, request_spec):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes must override this method and return
|
||||||
a list of hosts in [(hostname, capability_dict)] format."""
|
a list of hosts in [(hostname, capability_dict)] format.
|
||||||
|
"""
|
||||||
raise NotImplemented()
|
raise NotImplemented()
|
||||||
|
|
||||||
def weigh_hosts(self, num, specs, hosts):
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes must override this method and return
|
||||||
a lists of hosts in [{weight, hostname}] format."""
|
a lists of hosts in [{weight, hostname}] format.
|
||||||
|
"""
|
||||||
raise NotImplemented()
|
raise NotImplemented()
|
||||||
|
|||||||
@@ -169,6 +169,25 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.volume_destroy(self.context, vol1['id'])
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
db.volume_destroy(self.context, vol2['id'])
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Makes sure create_volume works when we specify a snapshot."""
|
||||||
|
vol = db.volume_create(self.context, {'size': 1})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'volume_size': vol['size'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_volume(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
volume_id = result['volumeId']
|
||||||
|
result = self.cloud.describe_volumes(self.context)
|
||||||
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
|
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
|
||||||
|
db.snapshot_destroy(self.context, snap['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_availability_zones(self):
|
def test_describe_availability_zones(self):
|
||||||
"""Makes sure describe_availability_zones works and filters results."""
|
"""Makes sure describe_availability_zones works and filters results."""
|
||||||
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
||||||
@@ -186,6 +205,52 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.service_destroy(self.context, service1['id'])
|
db.service_destroy(self.context, service1['id'])
|
||||||
db.service_destroy(self.context, service2['id'])
|
db.service_destroy(self.context, service2['id'])
|
||||||
|
|
||||||
|
def test_describe_snapshots(self):
|
||||||
|
"""Makes sure describe_snapshots works and filters results."""
|
||||||
|
vol = db.volume_create(self.context, {})
|
||||||
|
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 2)
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
|
||||||
|
result = self.cloud.describe_snapshots(self.context,
|
||||||
|
snapshot_id=[snapshot_id])
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(
|
||||||
|
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
|
||||||
|
snap2['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap1['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap2['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_create_snapshot(self):
|
||||||
|
"""Makes sure create_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_snapshot(self.context,
|
||||||
|
volume_id=volume_id)
|
||||||
|
snapshot_id = result['snapshotId']
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||||
|
|
||||||
|
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_delete_snapshot(self):
|
||||||
|
"""Makes sure delete_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.delete_snapshot(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
self.assertTrue(result)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""
|
"""
|
||||||
Tests For Scheduler Host Filter Drivers.
|
Tests For Scheduler Host Filters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -31,7 +31,7 @@ class FakeZoneManager:
|
|||||||
|
|
||||||
|
|
||||||
class HostFilterTestCase(test.TestCase):
|
class HostFilterTestCase(test.TestCase):
|
||||||
"""Test case for host filter drivers."""
|
"""Test case for host filters."""
|
||||||
|
|
||||||
def _host_caps(self, multiplier):
|
def _host_caps(self, multiplier):
|
||||||
# Returns host capabilities in the following way:
|
# Returns host capabilities in the following way:
|
||||||
@@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
'host_name-label': 'xs-%s' % multiplier}
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.old_flag = FLAGS.default_host_filter_driver
|
self.old_flag = FLAGS.default_host_filter
|
||||||
FLAGS.default_host_filter_driver = \
|
FLAGS.default_host_filter = \
|
||||||
'nova.scheduler.host_filter.AllHostsFilter'
|
'nova.scheduler.host_filter.AllHostsFilter'
|
||||||
self.instance_type = dict(name='tiny',
|
self.instance_type = dict(name='tiny',
|
||||||
memory_mb=50,
|
memory_mb=50,
|
||||||
@@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
self.zone_manager.service_states = states
|
self.zone_manager.service_states = states
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
FLAGS.default_host_filter_driver = self.old_flag
|
FLAGS.default_host_filter = self.old_flag
|
||||||
|
|
||||||
def test_choose_driver(self):
|
def test_choose_filter(self):
|
||||||
# Test default driver ...
|
# Test default filter ...
|
||||||
driver = host_filter.choose_driver()
|
hf = host_filter.choose_host_filter()
|
||||||
self.assertEquals(driver._full_name(),
|
self.assertEquals(hf._full_name(),
|
||||||
'nova.scheduler.host_filter.AllHostsFilter')
|
'nova.scheduler.host_filter.AllHostsFilter')
|
||||||
# Test valid driver ...
|
# Test valid filter ...
|
||||||
driver = host_filter.choose_driver(
|
hf = host_filter.choose_host_filter(
|
||||||
'nova.scheduler.host_filter.FlavorFilter')
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
self.assertEquals(driver._full_name(),
|
self.assertEquals(hf._full_name(),
|
||||||
'nova.scheduler.host_filter.FlavorFilter')
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
# Test invalid driver ...
|
# Test invalid filter ...
|
||||||
try:
|
try:
|
||||||
host_filter.choose_driver('does not exist')
|
host_filter.choose_host_filter('does not exist')
|
||||||
self.fail("Should not find driver")
|
self.fail("Should not find host filter.")
|
||||||
except exception.SchedulerHostFilterDriverNotFound:
|
except exception.SchedulerHostFilterNotFound:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def test_all_host_driver(self):
|
def test_all_host_filter(self):
|
||||||
driver = host_filter.AllHostsFilter()
|
hf = host_filter.AllHostsFilter()
|
||||||
cooked = driver.instance_type_to_filter(self.instance_type)
|
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
self.assertEquals(10, len(hosts))
|
self.assertEquals(10, len(hosts))
|
||||||
for host, capabilities in hosts:
|
for host, capabilities in hosts:
|
||||||
self.assertTrue(host.startswith('host'))
|
self.assertTrue(host.startswith('host'))
|
||||||
|
|
||||||
def test_flavor_driver(self):
|
def test_instance_type_filter(self):
|
||||||
driver = host_filter.FlavorFilter()
|
hf = host_filter.InstanceTypeFilter()
|
||||||
# filter all hosts that can support 50 ram and 500 disk
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
|
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
self.assertEquals(6, len(hosts))
|
self.assertEquals(6, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
just_hosts.sort()
|
just_hosts.sort()
|
||||||
self.assertEquals('host05', just_hosts[0])
|
self.assertEquals('host05', just_hosts[0])
|
||||||
self.assertEquals('host10', just_hosts[5])
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
def test_json_driver(self):
|
def test_json_filter(self):
|
||||||
driver = host_filter.JsonFilter()
|
hf = host_filter.JsonFilter()
|
||||||
# filter all hosts that can support 50 ram and 500 disk
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
self.assertEquals(6, len(hosts))
|
self.assertEquals(6, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
just_hosts.sort()
|
just_hosts.sort()
|
||||||
@@ -140,7 +141,7 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
]
|
]
|
||||||
]
|
]
|
||||||
cooked = json.dumps(raw)
|
cooked = json.dumps(raw)
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
self.assertEquals(5, len(hosts))
|
self.assertEquals(5, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
@@ -152,7 +153,7 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
['=', '$compute.host_memory_free', 30],
|
['=', '$compute.host_memory_free', 30],
|
||||||
]
|
]
|
||||||
cooked = json.dumps(raw)
|
cooked = json.dumps(raw)
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
self.assertEquals(9, len(hosts))
|
self.assertEquals(9, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
@@ -162,7 +163,7 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
|
|
||||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||||
cooked = json.dumps(raw)
|
cooked = json.dumps(raw)
|
||||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
self.assertEquals(5, len(hosts))
|
self.assertEquals(5, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
@@ -174,35 +175,32 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
raw = ['unknown command', ]
|
raw = ['unknown command', ]
|
||||||
cooked = json.dumps(raw)
|
cooked = json.dumps(raw)
|
||||||
try:
|
try:
|
||||||
driver.filter_hosts(self.zone_manager, cooked)
|
hf.filter_hosts(self.zone_manager, cooked)
|
||||||
self.fail("Should give KeyError")
|
self.fail("Should give KeyError")
|
||||||
except KeyError, e:
|
except KeyError, e:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
['not', True, False, True, False]
|
['not', True, False, True, False]
|
||||||
)))
|
)))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
driver.filter_hosts(self.zone_manager, json.dumps(
|
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
'not', True, False, True, False
|
'not', True, False, True, False
|
||||||
))
|
))
|
||||||
self.fail("Should give KeyError")
|
self.fail("Should give KeyError")
|
||||||
except KeyError, e:
|
except KeyError, e:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
['=', '$foo', 100]
|
json.dumps(['=', '$foo', 100])))
|
||||||
)))
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
json.dumps(['=', '$.....', 100])))
|
||||||
['=', '$.....', 100]
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
)))
|
json.dumps(
|
||||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
|
|
||||||
)))
|
|
||||||
|
|
||||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
['=', {}, ['>', '$missing....foo']]
|
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||||
)))
|
|
||||||
|
|||||||
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_volume(size='0'):
|
def _create_volume(size='0', snapshot_id=None):
|
||||||
"""Create a volume object."""
|
"""Create a volume object."""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['size'] = size
|
vol['size'] = size
|
||||||
|
vol['snapshot_id'] = snapshot_id
|
||||||
vol['user_id'] = 'fake'
|
vol['user_id'] = 'fake'
|
||||||
vol['project_id'] = 'fake'
|
vol['project_id'] = 'fake'
|
||||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Test volume can be created from a snapshot."""
|
||||||
|
volume_src_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_src_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_src_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
|
||||||
|
volume_dst_id = self._create_volume(0, snapshot_id)
|
||||||
|
self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
|
||||||
|
self.assertEqual(volume_dst_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).id)
|
||||||
|
self.assertEqual(snapshot_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).snapshot_id)
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_dst_id)
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_src_id)
|
||||||
|
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
"""Ensure failure if a too large of a volume is requested."""
|
"""Ensure failure if a too large of a volume is requested."""
|
||||||
# FIXME(vish): validation needs to move into the data layer in
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
@@ -176,6 +196,34 @@ class VolumeTestCase(test.TestCase):
|
|||||||
# This will allow us to test cross-node interactions
|
# This will allow us to test cross-node interactions
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_snapshot(volume_id, size='0'):
|
||||||
|
"""Create a snapshot object."""
|
||||||
|
snap = {}
|
||||||
|
snap['volume_size'] = size
|
||||||
|
snap['user_id'] = 'fake'
|
||||||
|
snap['project_id'] = 'fake'
|
||||||
|
snap['volume_id'] = volume_id
|
||||||
|
snap['status'] = "creating"
|
||||||
|
return db.snapshot_create(context.get_admin_context(), snap)['id']
|
||||||
|
|
||||||
|
def test_create_delete_snapshot(self):
|
||||||
|
"""Test snapshot can be created and deleted."""
|
||||||
|
volume_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||||
|
self.assertEqual(snapshot_id,
|
||||||
|
db.snapshot_get(context.get_admin_context(),
|
||||||
|
snapshot_id).id)
|
||||||
|
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.assertRaises(exception.NotFound,
|
||||||
|
db.snapshot_get,
|
||||||
|
self.context,
|
||||||
|
snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
|
|
||||||
class DriverTestCase(test.TestCase):
|
class DriverTestCase(test.TestCase):
|
||||||
"""Base Test class for Drivers."""
|
"""Base Test class for Drivers."""
|
||||||
|
|||||||
@@ -591,11 +591,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
|||||||
bob_shared = self.bob.compute_shared(alice_pub)
|
bob_shared = self.bob.compute_shared(alice_pub)
|
||||||
self.assertEquals(alice_shared, bob_shared)
|
self.assertEquals(alice_shared, bob_shared)
|
||||||
|
|
||||||
def test_encryption(self):
|
def _test_encryption(self, message):
|
||||||
msg = "This is a top-secret message"
|
enc = self.alice.encrypt(message)
|
||||||
enc = self.alice.encrypt(msg)
|
self.assertFalse(enc.endswith('\n'))
|
||||||
dec = self.bob.decrypt(enc)
|
dec = self.bob.decrypt(enc)
|
||||||
self.assertEquals(dec, msg)
|
self.assertEquals(dec, message)
|
||||||
|
|
||||||
|
def test_encrypt_simple_message(self):
|
||||||
|
self._test_encryption('This is a simple message.')
|
||||||
|
|
||||||
|
def test_encrypt_message_with_newlines_at_end(self):
|
||||||
|
self._test_encryption('This message has a newline at the end.\n')
|
||||||
|
|
||||||
|
def test_encrypt_many_newlines_at_end(self):
|
||||||
|
self._test_encryption('Message with lotsa newlines.\n\n\n')
|
||||||
|
|
||||||
|
def test_encrypt_newlines_inside_message(self):
|
||||||
|
self._test_encryption('Message\nwith\ninterior\nnewlines.')
|
||||||
|
|
||||||
|
def test_encrypt_with_leading_newlines(self):
|
||||||
|
self._test_encryption('\n\nMessage with leading newlines.')
|
||||||
|
|
||||||
|
def test_encrypt_really_long_message(self):
|
||||||
|
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
||||||
|
|||||||
@@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
sched.set_zone_manager(zm)
|
sched.set_zone_manager(zm)
|
||||||
|
|
||||||
fake_context = {}
|
fake_context = {}
|
||||||
self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
|
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||||
|
fake_context, 1,
|
||||||
|
dict(host_filter=None, instance_type={}))
|
||||||
|
|||||||
Reference in New Issue
Block a user