Burnin support by specifying a specific host via availability_zone for running
instances and volumes on.
This commit is contained in:
@@ -53,6 +53,7 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import logging
|
||||
import os
|
||||
@@ -432,6 +433,52 @@ class NetworkCommands(object):
|
||||
int(network_size), int(vlan_start),
|
||||
int(vpn_start))
|
||||
|
||||
|
||||
class ServiceCommands(object):
|
||||
"""Enable and disable running services"""
|
||||
|
||||
def list(self, host=None, service=None):
|
||||
"""Show a list of all running services. Filter by host & service name.
|
||||
args: [host] [service]"""
|
||||
ctxt = context.get_admin_context()
|
||||
now = datetime.datetime.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
if host:
|
||||
services = [s for s in services if s['host'] == host]
|
||||
if service:
|
||||
services = [s for s in services if s['binary'] == service]
|
||||
for svc in services:
|
||||
delta = now - (svc['updated_at'] or svc['created_at'])
|
||||
alive = (delta.seconds <= 15)
|
||||
art = (alive and ":-)") or "XXX"
|
||||
active = 'enabled'
|
||||
if svc['disabled']:
|
||||
active = 'disabled'
|
||||
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
|
||||
active, art,
|
||||
svc['updated_at'])
|
||||
|
||||
def enable(self, host, service):
|
||||
"""Enable scheduling for a service
|
||||
args: host service"""
|
||||
ctxt = context.get_admin_context()
|
||||
svc = db.service_get_by_args(ctxt, host, service)
|
||||
if not svc:
|
||||
print "Unable to find service"
|
||||
return
|
||||
db.service_update(ctxt, svc['id'], {'disabled': False})
|
||||
|
||||
def disable(self, host, service):
|
||||
"""Disable scheduling for a service
|
||||
args: host service"""
|
||||
ctxt = context.get_admin_context()
|
||||
svc = db.service_get_by_args(ctxt, host, service)
|
||||
if not svc:
|
||||
print "Unable to find service"
|
||||
return
|
||||
db.service_update(ctxt, svc['id'], {'disabled': True})
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@@ -439,7 +486,8 @@ CATEGORIES = [
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands)]
|
||||
('network', NetworkCommands),
|
||||
('service', ServiceCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -189,9 +189,45 @@ class CloudController(object):
|
||||
return data
|
||||
|
||||
def describe_availability_zones(self, context, **kwargs):
|
||||
if ('zone_name' in kwargs and
|
||||
'verbose' in kwargs['zone_name'] and
|
||||
context.is_admin):
|
||||
return self._describe_availability_zones_verbose(context,
|
||||
**kwargs)
|
||||
else:
|
||||
return self._describe_availability_zones(context, **kwargs)
|
||||
|
||||
def _describe_availability_zones(self, context, **kwargs):
|
||||
return {'availabilityZoneInfo': [{'zoneName': 'nova',
|
||||
'zoneState': 'available'}]}
|
||||
|
||||
def _describe_availability_zones_verbose(self, context, **kwargs):
|
||||
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
|
||||
'zoneState': 'available'}]}
|
||||
|
||||
services = db.service_get_all(context)
|
||||
now = db.get_time()
|
||||
hosts = []
|
||||
for host in [service['host'] for service in services]:
|
||||
if not host in hosts:
|
||||
hosts.append(host)
|
||||
for host in hosts:
|
||||
rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host,
|
||||
'zoneState': ''})
|
||||
hsvcs = [service for service in services if service['host'] == host]
|
||||
for svc in hsvcs:
|
||||
delta = now - (svc['updated_at'] or svc['created_at'])
|
||||
alive = (delta.seconds <= FLAGS.service_down_time)
|
||||
art = (alive and ":-)") or "XXX"
|
||||
active = 'enabled'
|
||||
if svc['disabled']:
|
||||
active = 'disabled'
|
||||
rv['availabilityZoneInfo'].append({
|
||||
'zoneName': '| |- %s' % svc['binary'],
|
||||
'zoneState': '%s %s %s' % (active, art,
|
||||
svc['updated_at'])})
|
||||
return rv
|
||||
|
||||
def describe_regions(self, context, region_name=None, **kwargs):
|
||||
if FLAGS.region_list:
|
||||
regions = []
|
||||
@@ -757,6 +793,8 @@ class CloudController(object):
|
||||
description=kwargs.get('display_description'),
|
||||
key_name=kwargs.get('key_name'),
|
||||
security_group=kwargs.get('security_group'),
|
||||
availability_zone=kwargs.get('placement', {}).get(
|
||||
'AvailabilityZone'),
|
||||
generate_hostname=internal_id_to_ec2_id)
|
||||
return self._format_run_instances(context,
|
||||
instances[0]['reservation_id'])
|
||||
|
||||
@@ -57,6 +57,7 @@ class ComputeAPI(base.Base):
|
||||
max_count=1, kernel_id=None, ramdisk_id=None,
|
||||
display_name='', description='', key_name=None,
|
||||
key_data=None, security_group='default',
|
||||
availability_zone=None,
|
||||
generate_hostname=generate_default_hostname):
|
||||
"""Create the number of instances requested if quote and
|
||||
other arguments check out ok."""
|
||||
@@ -121,7 +122,8 @@ class ComputeAPI(base.Base):
|
||||
'display_name': display_name,
|
||||
'display_description': description,
|
||||
'key_name': key_name,
|
||||
'key_data': key_data}
|
||||
'key_data': key_data,
|
||||
'availability_zone': availability_zone}
|
||||
|
||||
elevated = context.elevated()
|
||||
instances = []
|
||||
|
||||
@@ -148,7 +148,7 @@ class Service(BASE, NovaBase):
|
||||
binary = Column(String(255))
|
||||
topic = Column(String(255))
|
||||
report_count = Column(Integer, nullable=False, default=0)
|
||||
disabled = Column(Boolean, default=False)
|
||||
disabled = Column(Boolean, default=True)
|
||||
|
||||
|
||||
class Instance(BASE, NovaBase):
|
||||
@@ -210,6 +210,8 @@ class Instance(BASE, NovaBase):
|
||||
launched_at = Column(DateTime)
|
||||
terminated_at = Column(DateTime)
|
||||
|
||||
availability_zone = Column(String(255))
|
||||
|
||||
# User editable field for display in user-facing UIs
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
|
||||
@@ -37,6 +37,11 @@ class NoValidHost(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
class WillNotSchedule(exception.Error):
|
||||
"""The specified host is not up or doesn't exist."""
|
||||
pass
|
||||
|
||||
|
||||
class Scheduler(object):
|
||||
"""The base class that all Scheduler clases should inherit from."""
|
||||
|
||||
|
||||
@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if instance_ref['availability_zone'] and context.is_admin:
|
||||
zone, _x, host = instance_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s is not alive" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
if (':' in volume_ref['availability_zone']) and context.is_admin:
|
||||
zone, _x, host = volume_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s not available" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
Tests For Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
@@ -93,7 +95,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
|
||||
def _create_instance(self):
|
||||
def _create_instance(self, **kwargs):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
@@ -104,6 +106,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['vcpus'] = 1
|
||||
inst['availability_zone'] = kwargs.get('availability_zone', None)
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self):
|
||||
@@ -112,10 +115,11 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
vol['image_id'] = 'ami-test'
|
||||
vol['reservation_id'] = 'r-fakeres'
|
||||
vol['size'] = 1
|
||||
vol['availability_zone'] = 'test'
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_hosts_are_up(self):
|
||||
"""Ensures driver can find the hosts that are up"""
|
||||
def test_doesnt_report_disabled_hosts_as_up(self):
|
||||
"""Ensures driver doesn't find hosts before they are enabled"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
compute1 = service.Service('host1',
|
||||
@@ -129,7 +133,30 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(len(hosts), 2)
|
||||
self.assertEqual(0, len(hosts))
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_reports_enabled_hosts_as_up(self):
|
||||
"""Ensures driver can find the hosts that are up"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(2, len(hosts))
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
@@ -145,6 +172,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
@@ -156,6 +187,67 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_specific_host_gets_instance(self):
|
||||
"""Ensures if you set availability_zone it launches on that zone"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
now = datetime.datetime.utcnow()
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'disabled': False,
|
||||
'updated_at': past})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id2)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
|
||||
def test_will_schedule_on_disabled_host_if_specified(self):
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
|
||||
def test_too_many_cores(self):
|
||||
"""Ensures we don't go over max cores"""
|
||||
compute1 = service.Service('host1',
|
||||
@@ -168,6 +260,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
@@ -201,6 +297,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
volume_id1 = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
@@ -224,6 +324,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume')
|
||||
db.service_update(self.context, s1['id'], {'disabled': False})
|
||||
db.service_update(self.context, s2['id'], {'disabled': False})
|
||||
volume_ids1 = []
|
||||
volume_ids2 = []
|
||||
for index in xrange(FLAGS.max_gigabytes):
|
||||
|
||||
Reference in New Issue
Block a user