deb-cinder/cinder/manager.py
Gorka Eguileor a5b4e34f94 Add cluster job distribution
This patch adds a new mechanism to distribute jobs to c-vol nodes with
the addition of the "cluster" configuration option.

DocImpact: New cluster configuration option
Specs: https://review.openstack.org/327283
Implements: blueprint cinder-volume-active-active-support
Change-Id: Id0a2e4c6a7b5f17e4ee3484ae1b53c03d472b586
2016-07-22 21:00:11 +02:00

178 lines
6.6 KiB
Python

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
grouping of code relating to a portion of the system. In general other
components should be using the manager to make changes to the components that
it is responsible for.
For example, other components that need to deal with volumes in some way,
should do so by calling methods on the VolumeManager instead of directly
changing fields in the database. This allows us to keep all of the code
relating to volumes in the same place.
We have adopted a basic strategy of Smart managers and dumb data, which means
rather than attaching methods to data objects, components should call manager
methods that act on the data.
Methods on managers that can be executed locally should be called directly. If
a particular method must execute on a remote host, this should be done via rpc
to the service that wraps the manager
Managers should be responsible for most of the db access, and
non-implementation specific data. Anything implementation specific that can't
be generalized should be done by the Driver.
In general, we prefer to have one manager with multiple drivers for different
implementations, but sometimes it makes sense to have multiple managers. You
can think of it this way: Abstract different overall strategies at the manager
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
level(LinuxNetDriver vs CiscoNetDriver).
Managers will often provide methods for initial setup of a host or periodic
tasks to a wrapping service.
This module provides Manager, a base class for managers.
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import periodic_task
from cinder.db import base
from cinder.i18n import _LI
from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi
from eventlet import greenpool
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PeriodicTasks(periodic_task.PeriodicTasks):
def __init__(self):
super(PeriodicTasks, self).__init__(CONF)
class Manager(base.Base, PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, host=None, db_driver=None, cluster=None):
if not host:
host = CONF.host
self.host = host
self.cluster = cluster
self.additional_endpoints = []
super(Manager, self).__init__(db_driver)
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self, added_to_cluster=None):
"""Handle initialization if this is a standalone service.
A hook point for services to execute tasks before the services are made
available (i.e. showing up on RPC and starting to accept RPC calls) to
other components. Child classes should override this method.
:param added_to_cluster: True when a host's cluster configuration has
changed from not being defined or being '' to
any other value and the DB service record
reflects this new value.
"""
pass
def init_host_with_rpc(self):
"""A hook for service to do jobs after RPC is ready.
Like init_host(), this method is a hook where services get a chance
to execute tasks that *need* RPC. Child classes should override
this method.
"""
pass
def is_working(self):
"""Method indicating if service is working correctly.
This method is supposed to be overriden by subclasses and return if
manager is working correctly.
"""
return True
def reset(self):
"""Method executed when SIGHUP is caught by the process.
We're utilizing it to reset RPC API version pins to avoid restart of
the service when rolling upgrade is completed.
"""
LOG.info(_LI('Resetting cached RPC version pins.'))
rpc.LAST_OBJ_VERSIONS = {}
rpc.LAST_RPC_VERSIONS = {}
class SchedulerDependentManager(Manager):
"""Periodically send capability updates to the Scheduler services.
Services that need to update the Scheduler of their capabilities
should derive from this class. Otherwise they can derive from
manager.Manager directly. Updates are only sent after
update_service_capabilities is called with non-None values.
"""
def __init__(self, host=None, db_driver=None, service_name='undefined',
cluster=None):
self.last_capabilities = None
self.service_name = service_name
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self._tp = greenpool.GreenPool()
super(SchedulerDependentManager, self).__init__(host, db_driver,
cluster=cluster)
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
self.last_capabilities = capabilities
@periodic_task.periodic_task
def _publish_service_capabilities(self, context):
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug('Notifying Schedulers of capabilities ...')
self.scheduler_rpcapi.update_service_capabilities(
context,
self.service_name,
self.host,
self.last_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def reset(self):
super(SchedulerDependentManager, self).reset()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()