Merge "Remove availability_zones from service table"
This commit is contained in:
@@ -70,6 +70,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import availability_zones
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova import config
|
||||
@@ -626,6 +627,7 @@ class ServiceCommands(object):
|
||||
ctxt = context.get_admin_context()
|
||||
now = timeutils.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
services = availability_zone.set_availability_zones(ctxt, services)
|
||||
if host:
|
||||
services = [s for s in services if s['host'] == host]
|
||||
if service:
|
||||
@@ -741,6 +743,7 @@ class HostCommands(object):
|
||||
ctxt = context.get_admin_context()
|
||||
now = timeutils.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
services = availability_zones.set_availability_zones(ctxt, services)
|
||||
if zone:
|
||||
services = [s for s in services if s['availability_zone'] == zone]
|
||||
hosts = []
|
||||
|
||||
@@ -628,8 +628,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||
'binary': 'nova-compute',
|
||||
'topic': 'compute',
|
||||
'report_count': 0,
|
||||
'availability_zone': 'zone'}
|
||||
'report_count': 0}
|
||||
|
||||
return db.service_create(context.get_admin_context(), service_ref)
|
||||
|
||||
|
||||
@@ -331,3 +331,61 @@ class TestMigrations(test.TestCase):
|
||||
|
||||
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
|
||||
_145_check()
|
||||
|
||||
def test_migration_147(self):
|
||||
az = 'test_zone'
|
||||
host1 = 'compute-host1'
|
||||
host2 = 'compute-host2'
|
||||
|
||||
def _146_check():
|
||||
service = services.select(services.c.id == 1).execute().first()
|
||||
self.assertEqual(az, service.availability_zone)
|
||||
self.assertEqual(host1, service.host)
|
||||
service = services.select(services.c.id == 2).execute().first()
|
||||
self.assertNotEqual(az, service.availability_zone)
|
||||
service = services.select(services.c.id == 3).execute().first()
|
||||
self.assertEqual(az, service.availability_zone)
|
||||
self.assertEqual(host2, service.host)
|
||||
|
||||
for key, engine in self.engines.items():
|
||||
migration_api.version_control(engine, TestMigrations.REPOSITORY,
|
||||
migration.INIT_VERSION)
|
||||
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
|
||||
metadata = sqlalchemy.schema.MetaData()
|
||||
metadata.bind = engine
|
||||
|
||||
#populate service table
|
||||
services = sqlalchemy.Table('services', metadata,
|
||||
autoload=True)
|
||||
services.insert().values(id=1, host=host1,
|
||||
binary='nova-compute', topic='compute', report_count=0,
|
||||
availability_zone=az).execute()
|
||||
services.insert().values(id=2, host='sched-host',
|
||||
binary='nova-scheduler', topic='scheduler', report_count=0,
|
||||
availability_zone='ignore_me').execute()
|
||||
services.insert().values(id=3, host=host2,
|
||||
binary='nova-compute', topic='compute', report_count=0,
|
||||
availability_zone=az).execute()
|
||||
|
||||
_146_check()
|
||||
|
||||
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147)
|
||||
|
||||
# check aggregate metadata
|
||||
aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
|
||||
metadata, autoload=True)
|
||||
aggregate_hosts = sqlalchemy.Table('aggregate_hosts',
|
||||
metadata, autoload=True)
|
||||
metadata = aggregate_metadata.select(aggregate_metadata.c.
|
||||
aggregate_id == 1).execute().first()
|
||||
self.assertEqual(az, metadata['value'])
|
||||
self.assertEqual(aggregate_hosts.select(
|
||||
aggregate_hosts.c.aggregate_id == 1).execute().
|
||||
first().host, host1)
|
||||
blank = [h for h in aggregate_hosts.select(
|
||||
aggregate_hosts.c.aggregate_id == 2).execute()]
|
||||
self.assertEqual(blank, [])
|
||||
|
||||
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
|
||||
|
||||
_146_check()
|
||||
|
||||
@@ -61,7 +61,7 @@ CONF.import_opt('compute_manager', 'nova.config')
|
||||
CONF.import_opt('compute_driver', 'nova.virt.driver')
|
||||
CONF.import_opt('host', 'nova.config')
|
||||
CONF.import_opt('network_manager', 'nova.config')
|
||||
CONF.import_opt('node_availability_zone', 'nova.config')
|
||||
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
|
||||
|
||||
IMAGE_MACHINE = '1'
|
||||
IMAGE_KERNEL = '2'
|
||||
@@ -206,7 +206,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
|
||||
vol['user_id'] = 'fake'
|
||||
vol['project_id'] = 'fake'
|
||||
vol['host'] = 'localhost'
|
||||
vol['availability_zone'] = CONF.node_availability_zone
|
||||
vol['availability_zone'] = CONF.default_availability_zone
|
||||
vol['status'] = "creating"
|
||||
vol['attach_status'] = "detached"
|
||||
return db.volume_create(self.context, vol)
|
||||
@@ -2196,8 +2196,7 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
|
||||
{'host': host,
|
||||
'binary': 'nova-compute',
|
||||
'topic': 'compute',
|
||||
'report_count': 0,
|
||||
'availability_zone': avail_zone})
|
||||
'report_count': 0})
|
||||
return values
|
||||
|
||||
|
||||
@@ -2213,7 +2212,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
||||
'Dom0IptablesFirewallDriver',
|
||||
host='host',
|
||||
compute_driver='xenapi.XenAPIDriver',
|
||||
node_availability_zone='avail_zone1')
|
||||
default_availability_zone='avail_zone1')
|
||||
self.flags(use_local=True, group='conductor')
|
||||
host_ref = xenapi_fake.get_all('host')[0]
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||
|
||||
Reference in New Issue
Block a user