Remove availability_zones from service table

This is the final step in enabling availability_zones using aggregate
metadata. Previously all services had an availability_zone, but the
availability_zone is only used for nova-compute.  Services such as
nova-scheduler, nova-network, nova-conductor have always spanned all
availability_zones.

After this change only compute nodes (nova-compute), will have an
availability_zone. In order to preserve current APIs, when running:
* nova host-list (os-hosts)
* euca-describe-availability-zones verbose
* nova-manage service list
Internal services will appear in there own internal availability_zone
(CONF.internal_service_availability_zone)
Internal zone is hidden in euca-describe-availability_zones
(non-verbose)

CONF.node_availability_zone has been renamed to
CONF.default_availability_zone and is only used by the nova-api and
nova-scheduler. CONF.node_availability_zone still works but is
deprecated

DocImpact

Completes blueprint aggregate-based-availability-zones

Change-Id: Ib772df5f9ac2865f20df479f8ddce575a9ce3aff
This commit is contained in:
Joe Gordon
2012-12-20 03:13:01 +00:00
parent c1525e228c
commit 39fdd9344e
8 changed files with 144 additions and 20 deletions

View File

@@ -70,6 +70,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.compute import instance_types
from nova.compute import rpcapi as compute_rpcapi
from nova import config
@@ -626,6 +627,7 @@ class ServiceCommands(object):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
services = availability_zone.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -741,6 +743,7 @@ class HostCommands(object):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []

View File

@@ -8,22 +8,22 @@
{
"host_name": "a98b433151084aee8b1a986e28823b36",
"service": "cert",
"zone": "nova"
"zone": "internal"
},
{
"host_name": "c56158d13a884a87abf9171efb7de9d8",
"service": "network",
"zone": "nova"
"zone": "internal"
},
{
"host_name": "81d5cdcda0014918b3ebd3503a2e5c9a",
"service": "scheduler",
"zone": "nova"
"zone": "internal"
},
{
"host_name": "6e48bfe1a3304b7b86154326328750ae",
"service": "conductor",
"zone": "nova"
"zone": "internal"
}
]
}
}

View File

@@ -0,0 +1,62 @@
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" utilities for multiple APIs"""
from nova import db
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
# deprecated in Grizzly release
deprecated_name='node_availability_zone',
default='nova',
help='default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
LOG = logging.getLogger(__name__)
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
metadata = db.aggregate_host_get_by_metadata_key(context,
key='availability_zone')
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = str(metadata[service['host']])[5:-2]
else:
az = CONF.default_availability_zone
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host):
metadata = db.aggregate_metadata_get_by_host(
context.get_admin_context(), host, key='availability_zone')
if 'availability_zone' in metadata:
return list(metadata['availability_zone'])[0]
else:
return CONF.default_availability_zone

View File

@@ -14,15 +14,21 @@
# under the License.
from nova import availability_zones
from nova import db
from nova.openstack.common import cfg
from nova.scheduler import filters
CONF = cfg.CONF
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
class AvailabilityZoneFilter(filters.BaseHostFilter):
"""Filters Hosts by availability zone.
Works with both service and aggregate metadata.
For aggregate metadata uses the key 'availability_zone'
Works with aggregate metadata availability zones, using the key
'availability_zone'
Note: in theory a compute node can be part of multiple availability_zones
"""
@@ -32,12 +38,12 @@ class AvailabilityZoneFilter(filters.BaseHostFilter):
availability_zone = props.get('availability_zone')
if availability_zone:
if availability_zone == host_state.service['availability_zone']:
return True
context = filter_properties['context'].elevated()
metadata = db.aggregate_metadata_get_by_host(
context, host_state.host, key='availability_zone')
if 'availability_zone' in metadata:
return availability_zone in metadata['availability_zone']
else:
return availability_zone == CONF.default_availability_zone
return False
return True

View File

@@ -92,7 +92,6 @@ service_opts = [
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.config')
CONF.import_opt('node_availability_zone', 'nova.config')
class SignalExit(SystemExit):
@@ -447,13 +446,11 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
zone = CONF.node_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
'report_count': 0})
self.service_id = service_ref['id']
def __getattr__(self, key):

View File

@@ -628,8 +628,7 @@ class LibvirtConnTestCase(test.TestCase):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': 'zone'}
'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)

View File

@@ -331,3 +331,61 @@ class TestMigrations(test.TestCase):
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
_145_check()
def test_migration_147(self):
az = 'test_zone'
host1 = 'compute-host1'
host2 = 'compute-host2'
def _146_check():
service = services.select(services.c.id == 1).execute().first()
self.assertEqual(az, service.availability_zone)
self.assertEqual(host1, service.host)
service = services.select(services.c.id == 2).execute().first()
self.assertNotEqual(az, service.availability_zone)
service = services.select(services.c.id == 3).execute().first()
self.assertEqual(az, service.availability_zone)
self.assertEqual(host2, service.host)
for key, engine in self.engines.items():
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
#populate service table
services = sqlalchemy.Table('services', metadata,
autoload=True)
services.insert().values(id=1, host=host1,
binary='nova-compute', topic='compute', report_count=0,
availability_zone=az).execute()
services.insert().values(id=2, host='sched-host',
binary='nova-scheduler', topic='scheduler', report_count=0,
availability_zone='ignore_me').execute()
services.insert().values(id=3, host=host2,
binary='nova-compute', topic='compute', report_count=0,
availability_zone=az).execute()
_146_check()
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147)
# check aggregate metadata
aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
metadata, autoload=True)
aggregate_hosts = sqlalchemy.Table('aggregate_hosts',
metadata, autoload=True)
metadata = aggregate_metadata.select(aggregate_metadata.c.
aggregate_id == 1).execute().first()
self.assertEqual(az, metadata['value'])
self.assertEqual(aggregate_hosts.select(
aggregate_hosts.c.aggregate_id == 1).execute().
first().host, host1)
blank = [h for h in aggregate_hosts.select(
aggregate_hosts.c.aggregate_id == 2).execute()]
self.assertEqual(blank, [])
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
_146_check()

View File

@@ -61,7 +61,7 @@ CONF.import_opt('compute_manager', 'nova.config')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.config')
CONF.import_opt('network_manager', 'nova.config')
CONF.import_opt('node_availability_zone', 'nova.config')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
@@ -206,7 +206,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = CONF.node_availability_zone
vol['availability_zone'] = CONF.default_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@@ -2196,8 +2196,7 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': avail_zone})
'report_count': 0})
return values
@@ -2213,7 +2212,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
node_availability_zone='avail_zone1')
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)