Remove redundant 'availability_zone' config options.

There are two availability_zone related config options now:
  node_availability_zone and storage_availability_zone
While volume API uses node_availability_zone, the scheduler uses
the other, which causes failure in scheduler when these two options
have different values.

This patch removes 'node_availability_zone' so that end user
can specify 'availability zone' when create volumes using
Simple scheduler.

Partially implement blueprint implement-availability-zones

Change-Id: Ia2f0710908495ec64dacd2ee9bd10d7f8cd415df
This commit is contained in:
Zhiteng Huang 2012-08-09 00:41:28 +08:00
parent e8db777de6
commit 30a5916166
6 changed files with 5 additions and 12 deletions

View File

@ -218,7 +218,7 @@ global_opts = [
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address.'),
cfg.StrOpt('node_availability_zone',
cfg.StrOpt('storage_availability_zone',
default='cinder',
help='availability zone of this node'),
cfg.StrOpt('notification_driver',

View File

@ -43,7 +43,7 @@ FLAGS.register_opts(simple_scheduler_opts)
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
def schedule_create_volume(self, context, volume_id, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
elevated = context.elevated()

View File

@ -202,7 +202,7 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
zone = FLAGS.node_availability_zone
zone = FLAGS.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
@ -289,7 +289,7 @@ class Service(object):
def report_state(self):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
zone = FLAGS.node_availability_zone
zone = FLAGS.storage_availability_zone
state_catalog = {}
try:
try:

View File

@ -41,7 +41,6 @@ volume_host_opt = cfg.BoolOpt('snapshot_same_host',
FLAGS = flags.FLAGS
FLAGS.register_opt(volume_host_opt)
flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)

View File

@ -29,7 +29,6 @@ intact.
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:storage_availability_zone: Defaults to `cinder`.
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.driver.ISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
@ -55,9 +54,6 @@ from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
volume_manager_opts = [
cfg.StrOpt('storage_availability_zone',
default='cinder',
help='availability zone of this service'),
cfg.StrOpt('volume_driver',
default='cinder.volume.driver.ISCSIDriver',
help='Driver to use for volume creation'),

View File

@ -122,7 +122,7 @@
###### (StrOpt) ip address of this host
# my_ip="10.0.0.1"
###### (StrOpt) availability zone of this node
# node_availability_zone="cinder"
# storage_availability_zone="cinder"
###### (StrOpt) Default driver for sending notifications
# notification_driver="cinder.notifier.no_op_notifier"
###### (StrOpt) kernel image that indicates not to use a kernel, but to use a raw disk image instead
@ -353,8 +353,6 @@
# rpc_thread_pool_size=1024
###### (StrOpt) File name of clean sqlite db
# sqlite_clean_db="clean.sqlite"
###### (StrOpt) availability zone of this service
# storage_availability_zone="cinder"
###### (BoolOpt) if True, will not discover local volumes
# use_local_volumes=true
###### (StrOpt) Driver to use for volume creation