modify cinder README.md
add vol/snapshot name length check add vol/snapshot deletion check Change-Id: I71e9c1f4af9231fd3a5378d988f3e7c9fa2327d4
This commit is contained in:
@@ -98,7 +98,7 @@ We provide two ways to install the cinder proxy code. In this section, we will g
|
||||
|
||||
In case the automatic installation process is not complete, please check the followings:
|
||||
|
||||
- Make sure your OpenStack version is Icehouse.
|
||||
- Make sure your OpenStack version is Juno.
|
||||
|
||||
- Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide.
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@ intact.
|
||||
|
||||
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
|
||||
:volume_manager: The module name of a class derived from
|
||||
:class:`manager.Manager` (default:
|
||||
:class:`cinder.volume.manager.Manager`).
|
||||
:class:`cinder.Manager` (default:
|
||||
:class:`cinder.volume.cinder_proxy.CinderProxy`).
|
||||
:volume_group: Name of the group that will contain exported volumes (default:
|
||||
`cinder-volumes`)
|
||||
:num_shell_tries: Number of times to attempt to run commands (default: 3)
|
||||
@@ -182,6 +182,11 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
RPC_API_VERSION = '1.18'
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
VOLUME_NAME_LENGTH = 255
|
||||
VOLUME_UUID_LENGTH = 36
|
||||
SNAPSHOT_NAME_LENGTH = 255
|
||||
SNAPSHOT_UUID_LENGTH = 36
|
||||
|
||||
def __init__(self, service_name=None, *args, **kwargs):
|
||||
"""Load the specified in args, or flags."""
|
||||
# update_service_capabilities needs service_name to be volume
|
||||
@@ -221,9 +226,8 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
LOG.exception(ex)
|
||||
|
||||
def _get_ccding_volume_id(self, volume):
|
||||
VOLUME_UUID_LENGTH = 36
|
||||
csd_name = volume._info["name"]
|
||||
uuid_len = VOLUME_UUID_LENGTH
|
||||
uuid_len = self.VOLUME_UUID_LENGTH
|
||||
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
|
||||
return csd_name[-uuid_len:]
|
||||
try:
|
||||
@@ -231,10 +235,23 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
except KeyError:
|
||||
return ''
|
||||
|
||||
def _gen_ccding_volume_name(self, volume_name, volume_id):
|
||||
max_len = self.VOLUME_NAME_LENGTH - self.VOLUME_UUID_LENGTH - 1
|
||||
if (len(volume_name) <= max_len):
|
||||
return volume_name + "@" + volume_id
|
||||
else:
|
||||
return volume_name[0:max_len] + "@" + volume_id
|
||||
|
||||
def _gen_ccding_snapshot_name(self, snapshot_name, snapshot_id):
|
||||
max_len = self.SNAPSHOT_NAME_LENGTH - self.SNAPSHOT_UUID_LENGTH - 1
|
||||
if (len(snapshot_name) <= max_len):
|
||||
return snapshot_name + "@" + snapshot_id
|
||||
else:
|
||||
return snapshot_name[0:max_len] + "@" + snapshot_id
|
||||
|
||||
def _get_ccding_snapsot_id(self, snapshot):
|
||||
SNAPSHOT_UUID_LENGTH = 36
|
||||
csd_name = snapshot._info["name"]
|
||||
uuid_len = SNAPSHOT_UUID_LENGTH
|
||||
uuid_len = self.SNAPSHOT_UUID_LENGTH
|
||||
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
|
||||
return csd_name[-uuid_len:]
|
||||
try:
|
||||
@@ -272,9 +289,7 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
try:
|
||||
ctx_dict = context.to_dict()
|
||||
cinderclient = cinder_client.Client(
|
||||
username=ctx_dict.get('user_id'),
|
||||
api_key=ctx_dict.get('auth_token'),
|
||||
project_id=ctx_dict.get('project_name'),
|
||||
auth_url=cfg.CONF.keystone_auth_url)
|
||||
cinderclient.client.auth_token = ctx_dict.get('auth_token')
|
||||
cinderclient.client.management_url = \
|
||||
@@ -367,7 +382,8 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
try:
|
||||
volume_properties = request_spec.get('volume_properties')
|
||||
size = volume_properties.get('size')
|
||||
display_name = volume_properties.get('display_name')+"@"+volume_id
|
||||
volume_name = volume_properties.get('display_name')
|
||||
display_name = self._gen_ccding_volume_name(volume_name, volume_id)
|
||||
display_description = volume_properties.get('display_description')
|
||||
volume_type_id = volume_properties.get('volume_type_id')
|
||||
user_id = ctx_dict.get('user_id')
|
||||
@@ -375,8 +391,6 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
|
||||
cascaded_snapshot_id = None
|
||||
if snapshot_id is not None:
|
||||
# snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
# cascaded_snapshot_id = snapshot_ref['mapping_uuid']
|
||||
cascaded_snapshot_id = \
|
||||
self.volumes_mapping_cache['volumes'].get(snapshot_id, '')
|
||||
LOG.info(_('Cascade info: create volume from snapshot, '
|
||||
@@ -384,8 +398,6 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
|
||||
cascaded_source_volid = None
|
||||
if source_volid is not None:
|
||||
# vol_ref = self.db.volume_get(context, source_volid)
|
||||
# cascaded_source_volid = vol_ref['mapping_uuid']
|
||||
cascaded_source_volid = \
|
||||
self.volumes_mapping_cache['volumes'].get(volume_id, '')
|
||||
LOG.info(_('Cascade info: create volume from source volume, '
|
||||
@@ -436,20 +448,10 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
metadata=metadata,
|
||||
imageRef=cascaded_image_id)
|
||||
|
||||
if 'logicalVolumeId' in metadata:
|
||||
metadata.pop('logicalVolumeId')
|
||||
# metadata['mapping_uuid'] = bodyResponse._info['id']
|
||||
self.db.volume_metadata_update(context, volume_id, metadata, True)
|
||||
|
||||
if bodyResponse._info['status'] == 'creating':
|
||||
self.volumes_mapping_cache['volumes'][volume_id] = \
|
||||
bodyResponse._info['id']
|
||||
|
||||
# self.db.volume_update(
|
||||
# context,
|
||||
# volume_id,
|
||||
# {'mapping_uuid': bodyResponse._info['id']})
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
@@ -592,11 +594,8 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
volumetypes = self.adminCinderClient.volume_types.list()
|
||||
qosSpecs = self.adminCinderClient.qos_specs.list()
|
||||
|
||||
volname_type_list = []
|
||||
vol_types = self.db.volume_type_get_all(context, inactive=False)
|
||||
LOG.debug(_("cascade info, vol_types cascading :%s"), vol_types)
|
||||
for vol_type in vol_types:
|
||||
volname_type_list.append(vol_type)
|
||||
for volumetype in volumetypes:
|
||||
LOG.debug(_("cascade info, vol types cascaded :%s"),
|
||||
volumetype)
|
||||
@@ -673,8 +672,23 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
|
||||
try:
|
||||
self._delete_cascaded_volume(context, volume_id)
|
||||
except exception.VolumeIsBusy:
|
||||
LOG.error(_("Cannot delete volume %s: volume is busy"),
|
||||
volume_ref['id'])
|
||||
self.db.volume_update(context, volume_ref['id'],
|
||||
{'status': 'available'})
|
||||
return True
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to deleting volume"))
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
{'status': 'error_deleting'})
|
||||
|
||||
# If deleting the source volume in a migration, we want to skip quotas
|
||||
# and other database updates.
|
||||
if volume_ref['migration_status']:
|
||||
return True
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
|
||||
@@ -735,6 +749,9 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
return
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_id,
|
||||
{'status': 'error_deleting'})
|
||||
LOG.error(_('Cascade info: failed to delete cascaded'
|
||||
' volume %s'), cascaded_volume_id)
|
||||
|
||||
@@ -743,7 +760,8 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
|
||||
context = context.elevated()
|
||||
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
display_name = snapshot_ref['display_name'] + "@" + snapshot_id
|
||||
snap_name = snapshot_ref['display_name']
|
||||
display_name = self._gen_ccding_snapshot_name(snap_name, snapshot_id)
|
||||
display_description = snapshot_ref['display_description']
|
||||
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
|
||||
|
||||
@@ -877,11 +895,11 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
cascaded_snapshot_id)
|
||||
|
||||
cinderClient = self._get_cinder_cascaded_user_client(context)
|
||||
cinderClient.volume_snapshots.get(snapshot_id)
|
||||
cinderClient.volume_snapshots.delete(cascaded_snapshot_id)
|
||||
cinderClient.volume_snapshots.get(cascaded_snapshot_id)
|
||||
resp = cinderClient.volume_snapshots.delete(cascaded_snapshot_id)
|
||||
self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '')
|
||||
LOG.info(_("delete casecade snapshot %s successfully."),
|
||||
cascaded_snapshot_id)
|
||||
LOG.info(_("delete casecade snapshot %s successfully. resp :%s"),
|
||||
cascaded_snapshot_id, resp)
|
||||
return
|
||||
except cinder_exception.NotFound:
|
||||
self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '')
|
||||
@@ -890,6 +908,9 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
return
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.snapshot_update(context,
|
||||
snapshot_id,
|
||||
{'status': 'error_deleting'})
|
||||
LOG.error(_("failed to delete cascade snapshot %s"),
|
||||
cascaded_snapshot_id)
|
||||
|
||||
@@ -1115,15 +1136,15 @@ class CinderProxy(manager.SchedulerDependentManager):
|
||||
|
||||
volume_stats = {
|
||||
'pools': [{
|
||||
'pool_name': 'LVM_iSCSI',
|
||||
'QoS_support': False,
|
||||
'pool_name': 'Huawei_Cascade',
|
||||
'QoS_support': True,
|
||||
'free_capacity_gb': 10240.0,
|
||||
'location_info': fake_location_info,
|
||||
'total_capacity_gb': 10240.0,
|
||||
'reserved_percentage': 0
|
||||
}],
|
||||
'driver_version': '2.0.0',
|
||||
'vendor_name': 'OpenSource',
|
||||
'vendor_name': 'Huawei',
|
||||
'volume_backend_name': 'LVM_iSCSI',
|
||||
'storage_protocol': 'iSCSI'}
|
||||
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_CONF_OPTION=("volume_manager volume_sync_interval periodic_interval cinder_tenant_name cinder_username cinder_password keystone_auth_url glance_cascading_flag cascading_glance_url cascaded_glance_url cascaded_cinder_url cascaded_region_name cascaded_available_zone")
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking backup..."
|
||||
if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled."
|
||||
log "If this is not the case, then please uninstall manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
if [ -d "${_BACKUP_DIR}/uninstall" ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall"
|
||||
fi
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
rm -r "${_BACKUP_DIR}/uninstall/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restoring code to the status before installing cinder-proxy..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering current files..."
|
||||
cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "updating config file..."
|
||||
for option in $_CINDER_CONF_OPTION
|
||||
do
|
||||
sed -i.uninstall.backup -e "/"$option "*=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}"
|
||||
done
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in updating, aborted."
|
||||
log "Recovering current files..."
|
||||
cp "${_BACKUP_DIR}/uninstall/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "cleaning up backup files..."
|
||||
rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error when cleaning up the backup files."
|
||||
fi
|
||||
|
||||
log "restarting cinder volume..."
|
||||
service openstack-cinder-volume restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder volume manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
|
||||
exit 0
|
||||
@@ -2,7 +2,7 @@ Cinder timestamp-query-patch
|
||||
===============================
|
||||
it will be patched in cascaded level's control node
|
||||
|
||||
cinder icehouse version database has update_at attribute for change_since
|
||||
cinder juno version database has update_at attribute for change_since
|
||||
query filter function, however cinder db api this version don't support
|
||||
timestamp query function. So it is needed to make this patch in cascaded level
|
||||
while syncronization state between cascading and cascaded openstack level
|
||||
@@ -17,7 +17,7 @@ Key modules
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack icehouse has been installed
|
||||
* openstack juno has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
@@ -26,7 +26,7 @@ We provide two ways to install the timestamp query patch code. In this section,
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- Make sure you have an existing installation of **Openstack Juno**.
|
||||
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking backup..."
|
||||
if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled."
|
||||
log "If this is not the case, then please uninstall manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
if [ -d "${_BACKUP_DIR}/uninstall" ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall"
|
||||
fi
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
rm -r "${_BACKUP_DIR}/uninstall/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restoring code to the status before installing cinder-proxy..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering current files..."
|
||||
cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log "cleaning up backup files..."
|
||||
rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error when cleaning up the backup files."
|
||||
fi
|
||||
|
||||
log "restarting cinder api..."
|
||||
service openstack-cinder-api restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder volume manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
|
||||
exit 0
|
||||
Reference in New Issue
Block a user