106c14a84b
This focuses the replication work on a specific use case, and eliminates some of the ambiguity in earlier versions. Additionally this implementation addresses needs for devices that do replication based on the whole backend-device or on Pools. Use case: DR scenario, where a storage device is rendered inoperable. This implementation allows the preservation of user data for those volumes that are of type replication-enabled. The goal is NOT to make failures completely transparent but instead to preserve data access while an Admin tries to rebuild/recover his/her cloud. It's very important to note that we're no longer interested in dealing with replication in Cinder at a Volume level. The concept of have "some" volumes failover, and "others" left behind, proved to not only be overly complex and difficult to implement, but we never identified a concrete use-case where one would use failover in a scenario where some volumes would stay and be accessible on a primary but other may be moved and accessed via a secondary. In this model, it's host/backend based. So when you failover, you're failing over an entire backend. We heavily leverage existing resources, specifically services, and capabilities. Implements: blueprint replication-update Change-Id: If862bcd18515098639f94a8294a8e44e1358c52a
87 lines
2.4 KiB
Python
87 lines
2.4 KiB
Python
# Copyright 2015 IBM Corp.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Custom fields for Cinder objects."""
|
|
|
|
from oslo_versionedobjects import fields
|
|
|
|
|
|
BaseEnumField = fields.BaseEnumField
|
|
Enum = fields.Enum
|
|
Field = fields.Field
|
|
FieldType = fields.FieldType
|
|
|
|
|
|
class BackupStatus(Enum):
|
|
ERROR = 'error'
|
|
ERROR_DELETING = 'error_deleting'
|
|
CREATING = 'creating'
|
|
AVAILABLE = 'available'
|
|
DELETING = 'deleting'
|
|
DELETED = 'deleted'
|
|
RESTORING = 'restoring'
|
|
|
|
ALL = (ERROR, ERROR_DELETING, CREATING, AVAILABLE, DELETING, DELETED,
|
|
RESTORING)
|
|
|
|
def __init__(self):
|
|
super(BackupStatus, self).__init__(valid_values=BackupStatus.ALL)
|
|
|
|
|
|
class BackupStatusField(BaseEnumField):
|
|
AUTO_TYPE = BackupStatus()
|
|
|
|
|
|
class ConsistencyGroupStatus(Enum):
|
|
ERROR = 'error'
|
|
AVAILABLE = 'available'
|
|
CREATING = 'creating'
|
|
DELETING = 'deleting'
|
|
DELETED = 'deleted'
|
|
UPDATING = 'updating'
|
|
IN_USE = 'in-use'
|
|
ERROR_DELETING = 'error_deleting'
|
|
|
|
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
|
|
UPDATING, IN_USE, ERROR_DELETING)
|
|
|
|
def __init__(self):
|
|
super(ConsistencyGroupStatus, self).__init__(
|
|
valid_values=ConsistencyGroupStatus.ALL)
|
|
|
|
|
|
class ConsistencyGroupStatusField(BaseEnumField):
|
|
AUTO_TYPE = ConsistencyGroupStatus()
|
|
|
|
|
|
class ReplicationStatus(Enum):
|
|
ERROR = 'error'
|
|
ENABLED = 'enabled'
|
|
DISABLED = 'disabled'
|
|
NOT_CAPABLE = 'not-capable'
|
|
FAILING_OVER = 'failing-over'
|
|
FAILOVER_ERROR = 'failover-error'
|
|
FAILED_OVER = 'failed-over'
|
|
|
|
ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER,
|
|
FAILED_OVER)
|
|
|
|
def __init__(self):
|
|
super(ReplicationStatus, self).__init__(
|
|
valid_values=ReplicationStatus.ALL)
|
|
|
|
|
|
class ReplicationStatusField(BaseEnumField):
|
|
AUTO_TYPE = ReplicationStatus()
|