Transplant tricircle to juno version

The tricircle project is based on Openstack Icehouse version, this
patch modify to adopt the Juno version.

Change-Id: I1afe6122575ed2bedb6e3220177e161763ce161c
This commit is contained in:
joey5678 2014-11-22 17:26:25 +08:00
parent e211f7efef
commit e6d7dc78ce
708 changed files with 10646 additions and 160919 deletions

View File

@ -49,6 +49,7 @@ from cinder import quota
from cinder import utils
from cinder import volume
from cinder.i18n import _
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
@ -65,6 +66,7 @@ from eventlet.greenpool import GreenPool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
@ -75,6 +77,10 @@ volume_manager_opts = [
default=5,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume data'),
cfg.IntOpt('voltype_sync_interval',
default=3600,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume type and qos data'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
@ -165,7 +171,7 @@ class CinderProxy(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.16'
RPC_API_VERSION = '1.18'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, service_name=None, *args, **kwargs):
@ -408,7 +414,8 @@ class CinderProxy(manager.SchedulerDependentManager):
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None):
"""Creates and exports the volume."""
ctx_dict = context.__dict__
@ -529,7 +536,6 @@ class CinderProxy(manager.SchedulerDependentManager):
if self._change_since_time is None:
search_opt = {'all_tenants': True}
volumes = cinderClient.volumes.list(search_opts=search_opt)
volumetypes = cinderClient.volume_types.list()
LOG.info(_('Cascade info: change since time is none,'
'volumes:%s'), volumes)
else:
@ -544,7 +550,6 @@ class CinderProxy(manager.SchedulerDependentManager):
search_op = {'all_tenants': True,
'changes-since': new_change_since_isotime}
volumes = cinderClient.volumes.list(search_opts=search_op)
volumetypes = cinderClient.volume_types.list()
LOG.info(_('Cascade info: search time is not none,'
'volumes:%s'), volumes)
@ -563,13 +568,18 @@ class CinderProxy(manager.SchedulerDependentManager):
'attach_time': timeutils.strtime()
})
elif volume_status == "available":
if volume._info['bootable'].lower() == 'false':
bv = '0'
else:
bv = '1'
self.db.volume_update(context, volume_id,
{'status': volume._info['status'],
'attach_status': 'detached',
'instance_uuid': None,
'attached_host': None,
'mountpoint': None,
'attach_time': None
'attach_time': None,
'bootable': bv
})
else:
self.db.volume_update(context, volume_id,
@ -577,19 +587,79 @@ class CinderProxy(manager.SchedulerDependentManager):
LOG.info(_('Cascade info: Updated the volume %s status from'
'cinder-proxy'), volume_id)
vol_types = self.db.volume_type_get_all(context, inactive=False)
for volumetype in volumetypes:
volume_type_name = volumetype._info['name']
if volume_type_name not in vol_types.keys():
extra_specs = volumetype._info['extra_specs']
self.db.volume_type_create(
context,
dict(name=volume_type_name, extra_specs=extra_specs))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to sys volume status to db.'))
@periodic_task.periodic_task(spacing=CONF.voltype_sync_interval,
run_immediately=True)
def _heal_volumetypes_and_qos(self, context):
try:
cinderClient = self._get_cinder_cascaded_admin_client()
volumetypes = cinderClient.volume_types.list()
qosSpecs = cinderClient.qos_specs.list()
volname_type_list = []
vol_types = self.db.volume_type_get_all(context, inactive=False)
LOG.debug(_("cascade info, vol_types cascading :%s"), vol_types)
for vol_type in vol_types:
volname_type_list.append(vol_type)
for volumetype in volumetypes:
LOG.debug(_("cascade info, vol types cascaded :%s"),
volumetype)
volume_type_name = volumetype._info['name']
if volume_type_name not in vol_types.keys():
extraspec = volumetype._info['extra_specs']
self.db.volume_type_create(
context,
dict(name=volume_type_name, extra_specs=extraspec))
qos_specs = self.db.qos_specs_get_all(context, inactive=False)
qosname_list_cascading = []
for qos_cascading in qos_specs:
qosname_list_cascading.append(qos_cascading['name'])
for qos_cascaded in qosSpecs:
qos_name_cascaded = qos_cascaded._info['name']
if qos_name_cascaded not in qosname_list_cascading:
qos_create_val = {}
qos_create_val['name'] = qos_name_cascaded
qos_spec_value = qos_cascaded._info['specs']
qos_spec_value['consumer'] = \
qos_cascaded._info['consumer']
qos_create_val['qos_specs'] = qos_spec_value
LOG.info(_('Cascade info, create qos_spec %sin db'),
qos_name_cascaded)
self.db.qos_specs_create(context, qos_create_val)
LOG.info(_('Cascade info, qos_spec finished %sin db'),
qos_create_val)
qos_specs_id = qos_cascading['id']
assoc_ccd =\
self.db.volume_type_qos_associations_get(context,
qos_specs_id)
qos_id = qos_cascaded._info['id']
association =\
cinderClient.qos_specs.get_associations(qos_id)
for assoc in association:
assoc_name = assoc._info['name']
LOG.debug(_("Cascade info, assoc name %s"), assoc_name)
if assoc_ccd is None or assoc_name not in assoc_ccd:
voltype = \
self.db.volume_type_get_by_name(context,
assoc_name)
LOG.debug(_("Cascade info, voltypes %s"), voltype)
self.db.qos_specs_associate(context,
qos_cascading['id'],
voltype['id'],)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to sys volume type to db.'))
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume."""
@ -606,9 +676,6 @@ class CinderProxy(manager.SchedulerDependentManager):
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume_ref['host'] != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
@ -919,6 +986,15 @@ class CinderProxy(manager.SchedulerDependentManager):
LOG.debug(_('Cascade info:upload volume to image,finish update'
'image %s locations %s.'), (image_id, locations))
volume = self.db.volume_get(context, volume_id)
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
@ -1021,16 +1097,21 @@ class CinderProxy(manager.SchedulerDependentManager):
def _report_driver_status(self, context):
LOG.info(_("Updating fake volume status"))
fake_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0'
volume_stats = {'QoS_support': False,
'location_info': fake_location_info,
'volume_backend_name': 'LVM_iSCSI',
'free_capacity_gb': 1024,
'driver_version': '2.0.0',
'total_capacity_gb': 1024,
'reserved_percentage': 0,
'vendor_name': 'Open Source',
'storage_protocol': 'iSCSI'
}
volume_stats = {
'pools': [{
'pool_name': 'LVM_iSCSI',
'QoS_support': False,
'free_capacity_gb': 10240.0,
'location_info': fake_location_info,
'total_capacity_gb': 10240.0,
'reserved_percentage': 0
}],
'driver_version': '2.0.0',
'vendor_name': 'OpenSource',
'volume_backend_name': 'LVM_iSCSI',
'storage_protocol': 'iSCSI'}
self.update_service_capabilities(volume_stats)
def publish_service_capabilities(self, context):

View File

@ -19,7 +19,7 @@ _CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
# please set the option list set in cinder configure file
_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1")
_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 voltype_sync_interval=3600 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1")
# if you did not make changes to the installation files,
# please do not edit the following directories.

View File

@ -130,7 +130,7 @@ Besides glance-api.conf file, we add some new config files. They are described s
#will sync to. (e.g. physicalOpenstack001, physicalOpenstack002)
snapshot_region_names =
- Last but also important, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance.
- Last but not least, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance.
these config correspond to various store scheme (at present, only filesystem is supported), the values
are based on your environment, so you have to config it before installation or restart the glance-sync
when modify it.

View File

@ -8,6 +8,9 @@ bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9595
#worker number
workers = 3
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
@ -21,7 +24,7 @@ backlog = 4096
#How to sync the image, the value can be ["None", "ALL", "USER"]
#When "ALL" choosen, means to sync to all the cascaded glances;
#When "USER" choosen, means according to user's role, project, etc.
sync_strategy = None
sync_strategy = All
#What the cascading glance endpoint is .
cascading_endpoint_url = http://127.0.0.1:9292/
@ -43,7 +46,7 @@ scp_copy_timeout = 3600
#When snapshot, one can set the specific regions in which the snapshot
#will sync to.
snapshot_region_names = physicalOpenstack001, physicalOpenstack002
snapshot_region_names = CascadedOne, CascadedTwo
[keystone_authtoken]
auth_host = 127.0.0.1
@ -51,7 +54,7 @@ auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = glance
admin_password = glance
admin_password = openstack
[paste_deploy]
config_file = /etc/glance/glance-sync-paste.ini
flavor=keystone

View File

@ -23,8 +23,7 @@ import eventlet
import os
import sys
from oslo.config import cfg
from glance.common import utils
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
@ -43,6 +42,11 @@ from glance.openstack.common import log
import glance.sync
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e))
sys.exit(returncode)
def main():
try:
config.parse_args(default_config_files='glance-sync.conf')
@ -51,8 +55,10 @@ def main():
server = wsgi.Server()
server.start(config.load_paste_app('glance-sync'), default_port=9595)
server.wait()
except exception.WorkerCreationFailure as e:
fail(2, e)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
fail(1, e)
if __name__ == '__main__':

View File

@ -26,10 +26,7 @@ from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.common import utils
from glance.openstack.common import importutils
from glance.openstack.common import jsonutils
from glance.openstack.common import threadgroup
from glance.openstack.common import timeutils
import glance.openstack.common.log as logging

View File

@ -1,21 +0,0 @@
[console_scripts]
glance-api = glance.cmd.api:main
glance-cache-cleaner = glance.cmd.cache_cleaner:main
glance-cache-manage = glance.cmd.cache_manage:main
glance-cache-prefetcher = glance.cmd.cache_prefetcher:main
glance-cache-pruner = glance.cmd.cache_pruner:main
glance-control = glance.cmd.control:main
glance-manage = glance.cmd.manage:main
glance-registry = glance.cmd.registry:main
glance-replicator = glance.cmd.replicator:main
glance-scrubber = glance.cmd.scrubber:main
[glance.common.image_location_strategy.modules]
location_order_strategy = glance.common.location_strategy.location_order
store_type_strategy = glance.common.location_strategy.store_type
[glance.sync.store.location]
filesystem = glance.sync.store._drivers.filesystem:LocationCreator
[glance.sync.store.driver]
filesystem = glance.sync.store._drivers.filesystem:Store

View File

@ -1,814 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import re
import sys
from oslo.config import cfg
import six
from glance.common import exception
from glance.common import utils
import glance.context
import glance.domain.proxy
from glance.openstack.common import importutils
import glance.openstack.common.log as logging
from glance import scrubber
from glance.store import location
LOG = logging.getLogger(__name__)
store_opts = [
cfg.ListOpt('known_stores',
default=[
'glance.store.filesystem.Store',
'glance.store.http.Store'
],
help=_('List of which store classes and store class locations '
'are currently known to glance at startup.')),
cfg.StrOpt('default_store', default='file',
help=_("Default scheme to use to store image data. The "
"scheme must be registered by one of the stores "
"defined by the 'known_stores' config option.")),
cfg.StrOpt('scrubber_datadir',
default='/var/lib/glance/scrubber',
help=_('Directory that the scrubber will use to track '
'information about what to delete. '
'Make sure this is set in glance-api.conf and '
'glance-scrubber.conf.')),
cfg.BoolOpt('delayed_delete', default=False,
help=_('Turn on/off delayed delete.')),
cfg.BoolOpt('use_user_token', default=True,
help=_('Whether to pass through the user token when '
'making requests to the registry.')),
cfg.IntOpt('scrub_time', default=0,
help=_('The amount of time in seconds to delay before '
'performing a delete.')),
]
REGISTERED_STORES = set()
CONF = cfg.CONF
CONF.register_opts(store_opts)
_ALL_STORES = [
'glance.store.filesystem.Store',
'glance.store.http.Store',
'glance.store.rbd.Store',
'glance.store.s3.Store',
'glance.store.swift.Store',
'glance.store.sheepdog.Store',
'glance.store.cinder.Store',
'glance.store.gridfs.Store',
'glance.store.vmware_datastore.Store'
]
class BackendException(Exception):
pass
class UnsupportedBackend(BackendException):
pass
class Indexable(object):
"""
Wrapper that allows an iterator or filelike be treated as an indexable
data structure. This is required in the case where the return value from
Store.get() is passed to Store.add() when adding a Copy-From image to a
Store where the client library relies on eventlet GreenSockets, in which
case the data to be written is indexed over.
"""
def __init__(self, wrapped, size):
"""
Initialize the object
:param wrappped: the wrapped iterator or filelike.
:param size: the size of data available
"""
self.wrapped = wrapped
self.size = int(size) if size else (wrapped.len
if hasattr(wrapped, 'len') else 0)
self.cursor = 0
self.chunk = None
def __iter__(self):
"""
Delegate iteration to the wrapped instance.
"""
for self.chunk in self.wrapped:
yield self.chunk
def __getitem__(self, i):
"""
Index into the next chunk (or previous chunk in the case where
the last data returned was not fully consumed).
:param i: a slice-to-the-end
"""
start = i.start if isinstance(i, slice) else i
if start < self.cursor:
return self.chunk[(start - self.cursor):]
self.chunk = self.another()
if self.chunk:
self.cursor += len(self.chunk)
return self.chunk
def another(self):
"""Implemented by subclasses to return the next element"""
raise NotImplementedError
def getvalue(self):
"""
Return entire string value... used in testing
"""
return self.wrapped.getvalue()
def __len__(self):
"""
Length accessor.
"""
return self.size
def _register_stores(store_classes):
"""
Given a set of store names, add them to a globally available set
of store names.
"""
for store_cls in store_classes:
REGISTERED_STORES.add(store_cls.__module__.split('.')[2])
# NOTE (spredzy): The actual class name is filesystem but in order
# to maintain backward compatibility we need to keep the 'file' store
# as a known store
if 'filesystem' in REGISTERED_STORES:
REGISTERED_STORES.add('file')
def _get_store_class(store_entry):
store_cls = None
try:
LOG.debug("Attempting to import store %s", store_entry)
store_cls = importutils.import_class(store_entry)
except exception.NotFound:
raise BackendException('Unable to load store. '
'Could not find a class named %s.'
% store_entry)
return store_cls
def create_stores():
"""
Registers all store modules and all schemes
from the given config. Duplicates are not re-registered.
"""
store_count = 0
store_classes = set()
for store_entry in set(CONF.known_stores + _ALL_STORES):
store_entry = store_entry.strip()
if not store_entry:
continue
store_cls = _get_store_class(store_entry)
try:
store_instance = store_cls()
except exception.BadStoreConfiguration as e:
if store_entry in CONF.known_stores:
LOG.warn(_("%s Skipping store driver.") % unicode(e))
continue
finally:
# NOTE(flaper87): To be removed in Juno
if store_entry not in CONF.known_stores:
LOG.deprecated(_("%s not found in `known_store`. "
"Stores need to be explicitly enabled in "
"the configuration file.") % store_entry)
schemes = store_instance.get_schemes()
if not schemes:
raise BackendException('Unable to register store %s. '
'No schemes associated with it.'
% store_cls)
else:
if store_cls not in store_classes:
LOG.debug("Registering store %s with schemes %s",
store_cls, schemes)
store_classes.add(store_cls)
scheme_map = {}
for scheme in schemes:
loc_cls = store_instance.get_store_location_class()
scheme_map[scheme] = {
'store_class': store_cls,
'location_class': loc_cls,
}
location.register_scheme_map(scheme_map)
store_count += 1
else:
LOG.debug("Store %s already registered", store_cls)
_register_stores(store_classes)
return store_count
def verify_default_store():
scheme = cfg.CONF.default_store
context = glance.context.RequestContext()
try:
get_store_from_scheme(context, scheme)
except exception.UnknownScheme:
msg = _("Store for scheme %s not found") % scheme
raise RuntimeError(msg)
def get_known_schemes():
"""Returns list of known schemes"""
return location.SCHEME_TO_CLS_MAP.keys()
def get_known_stores():
"""Returns list of known stores"""
return list(REGISTERED_STORES)
def get_store_from_scheme(context, scheme, loc=None):
"""
Given a scheme, return the appropriate store object
for handling that scheme.
"""
if scheme not in location.SCHEME_TO_CLS_MAP:
raise exception.UnknownScheme(scheme=scheme)
scheme_info = location.SCHEME_TO_CLS_MAP[scheme]
store = scheme_info['store_class'](context, loc)
return store
def get_store_from_uri(context, uri, loc=None):
"""
Given a URI, return the store object that would handle
operations on the URI.
:param uri: URI to analyze
"""
scheme = uri[0:uri.find('/') - 1]
store = get_store_from_scheme(context, scheme, loc)
return store
def get_from_backend(context, uri, **kwargs):
"""Yields chunks of data from backend specified by uri"""
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
try:
return store.get(loc)
except NotImplementedError:
raise exception.StoreGetNotSupported
def get_size_from_backend(context, uri):
"""Retrieves image size from backend specified by uri"""
if utils.is_glance_location(uri):
uri += ('?auth_token=' + context.auth_tok)
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
return store.get_size(loc)
def _check_glance_loc(context, location):
uri = location['url']
if not utils.is_glance_location(uri):
return False
if 'auth_token=' in uri:
return True
location['url'] = uri + ('?auth_token=' + context.auth_tok)
return True
def delete_from_backend(context, uri, **kwargs):
"""Removes chunks of data from backend specified by uri"""
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
try:
return store.delete(loc)
except NotImplementedError:
raise exception.StoreDeleteNotSupported
def get_store_from_location(uri):
"""
Given a location (assumed to be a URL), attempt to determine
the store from the location. We use here a simple guess that
the scheme of the parsed URL is the store...
:param uri: Location to check for the store
"""
loc = location.get_location_from_uri(uri)
return loc.store_name
def safe_delete_from_backend(context, uri, image_id, **kwargs):
"""Given a uri, delete an image from the store."""
try:
return delete_from_backend(context, uri, **kwargs)
except exception.NotFound:
msg = _('Failed to delete image %s in store from URI')
LOG.warn(msg % image_id)
except exception.StoreDeleteNotSupported as e:
LOG.warn(six.text_type(e))
except UnsupportedBackend:
exc_type = sys.exc_info()[0].__name__
msg = (_('Failed to delete image %(image_id)s from store '
'(%(error)s)') % {'image_id': image_id,
'error': exc_type})
LOG.error(msg)
def schedule_delayed_delete_from_backend(context, uri, image_id, **kwargs):
"""Given a uri, schedule the deletion of an image location."""
(file_queue, _db_queue) = scrubber.get_scrub_queues()
# NOTE(zhiyan): Defautly ask glance-api store using file based queue.
# In future we can change it using DB based queued instead,
# such as using image location's status to saving pending delete flag
# when that property be added.
if CONF.use_user_token is False:
context = None
file_queue.add_location(image_id, uri, user_context=context)
def delete_image_from_backend(context, store_api, image_id, uri):
if CONF.delayed_delete:
store_api.schedule_delayed_delete_from_backend(context, uri, image_id)
else:
store_api.safe_delete_from_backend(context, uri, image_id)
def check_location_metadata(val, key=''):
if isinstance(val, dict):
for key in val:
check_location_metadata(val[key], key=key)
elif isinstance(val, list):
ndx = 0
for v in val:
check_location_metadata(v, key='%s[%d]' % (key, ndx))
ndx = ndx + 1
elif not isinstance(val, unicode):
raise BackendException(_("The image metadata key %(key)s has an "
"invalid type of %(val)s. Only dict, list, "
"and unicode are supported.") %
{'key': key,
'val': type(val)})
def store_add_to_backend(image_id, data, size, store):
"""
A wrapper around a call to each stores add() method. This gives glance
a common place to check the output
:param image_id: The image add to which data is added
:param data: The data to be stored
:param size: The length of the data in bytes
:param store: The store to which the data is being added
:return: The url location of the file,
the size amount of data,
the checksum of the data
the storage systems metadata dictionary for the location
"""
(location, size, checksum, metadata) = store.add(image_id, data, size)
if metadata is not None:
if not isinstance(metadata, dict):
msg = (_("The storage driver %(store)s returned invalid metadata "
"%(metadata)s. This must be a dictionary type") %
{'store': six.text_type(store),
'metadata': six.text_type(metadata)})
LOG.error(msg)
raise BackendException(msg)
try:
check_location_metadata(metadata)
except BackendException as e:
e_msg = (_("A bad metadata structure was returned from the "
"%(store)s storage driver: %(metadata)s. %(error)s.") %
{'store': six.text_type(store),
'metadata': six.text_type(metadata),
'error': six.text_type(e)})
LOG.error(e_msg)
raise BackendException(e_msg)
return (location, size, checksum, metadata)
def add_to_backend(context, scheme, image_id, data, size):
store = get_store_from_scheme(context, scheme)
try:
return store_add_to_backend(image_id, data, size, store)
except NotImplementedError:
raise exception.StoreAddNotSupported
def set_acls(context, location_uri, public=False, read_tenants=[],
write_tenants=[]):
loc = location.get_location_from_uri(location_uri)
scheme = get_store_from_location(location_uri)
store = get_store_from_scheme(context, scheme, loc)
try:
store.set_acls(loc, public=public, read_tenants=read_tenants,
write_tenants=write_tenants)
except NotImplementedError:
LOG.debug(_("Skipping store.set_acls... not implemented."))
class ImageRepoProxy(glance.domain.proxy.Repo):
def __init__(self, image_repo, context, store_api):
self.context = context
self.store_api = store_api
proxy_kwargs = {'context': context, 'store_api': store_api}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def _set_acls(self, image):
public = image.visibility == 'public'
member_ids = []
if image.locations and not public:
member_repo = image.get_member_repo()
member_ids = [m.member_id for m in member_repo.list()]
for location in image.locations:
self.store_api.set_acls(self.context, location['url'], public,
read_tenants=member_ids)
def add(self, image):
result = super(ImageRepoProxy, self).add(image)
self._set_acls(image)
return result
def save(self, image):
result = super(ImageRepoProxy, self).save(image)
self._set_acls(image)
return result
def _check_location_uri(context, store_api, uri):
"""
Check if an image location uri is valid.
:param context: Glance request context
:param store_api: store API module
:param uri: location's uri string
"""
is_ok = True
try:
size = store_api.get_size_from_backend(context, uri)
# NOTE(zhiyan): Some stores return zero when it catch exception
is_ok = size > 0
except (exception.UnknownScheme, exception.NotFound):
is_ok = False
if not is_ok:
raise exception.BadStoreUri(_('Invalid location: %s') % uri)
def _check_image_location(context, store_api, location):
if not _check_glance_loc(context, location):
_check_location_uri(context, store_api, location['url'])
store_api.check_location_metadata(location['metadata'])
def _remove_extra_info(location):
url = location['url']
if url.startswith('http'):
start = url.find('auth_token')
if start == -1:
return
end = url.find('&', start)
if end == -1:
if url[start - 1] == '?':
url = re.sub(r'\?auth_token=\S+', r'', url)
elif url[start - 1] == '&':
url = re.sub(r'&auth_token=\S+', r'', url)
else:
url = re.sub(r'auth_token=\S+&', r'', url)
location['url'] = url
def _set_image_size(context, image, locations):
if not image.size:
for location in locations:
size_from_backend = glance.store.get_size_from_backend(
context, location['url'])
if size_from_backend:
# NOTE(flwang): This assumes all locations have the same size
image.size = size_from_backend
break
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
def __init__(self, factory, context, store_api):
self.context = context
self.store_api = store_api
proxy_kwargs = {'context': context, 'store_api': store_api}
super(ImageFactoryProxy, self).__init__(factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
locations = kwargs.get('locations', [])
for l in locations:
_check_image_location(self.context, self.store_api, l)
if locations.count(l) > 1:
raise exception.DuplicateLocation(location=l['url'])
return super(ImageFactoryProxy, self).new_image(**kwargs)
class StoreLocations(collections.MutableSequence):
"""
The proxy for store location property. It takes responsibility for:
1. Location uri correctness checking when adding a new location.
2. Remove the image data from the store when a location is removed
from an image.
"""
def __init__(self, image_proxy, value):
self.image_proxy = image_proxy
if isinstance(value, list):
self.value = value
else:
self.value = list(value)
def append(self, location):
# NOTE(flaper87): Insert this
# location at the very end of
# the value list.
self.insert(len(self.value), location)
def extend(self, other):
if isinstance(other, StoreLocations):
locations = other.value
else:
locations = list(other)
for location in locations:
self.append(location)
def insert(self, i, location):
_check_image_location(self.image_proxy.context,
self.image_proxy.store_api, location)
_remove_extra_info(location)
if location in self.value:
raise exception.DuplicateLocation(location=location['url'])
self.value.insert(i, location)
_set_image_size(self.image_proxy.context,
self.image_proxy,
[location])
def pop(self, i=-1):
location = self.value.pop(i)
try:
delete_image_from_backend(self.image_proxy.context,
self.image_proxy.store_api,
self.image_proxy.image.image_id,
location['url'])
except Exception:
self.value.insert(i, location)
raise
return location
def count(self, location):
return self.value.count(location)
def index(self, location, *args):
return self.value.index(location, *args)
def remove(self, location):
if self.count(location):
self.pop(self.index(location))
else:
self.value.remove(location)
def reverse(self):
self.value.reverse()
# Mutable sequence, so not hashable
__hash__ = None
def __getitem__(self, i):
return self.value.__getitem__(i)
def __setitem__(self, i, location):
_check_image_location(self.image_proxy.context,
self.image_proxy.store_api, location)
self.value.__setitem__(i, location)
_set_image_size(self.image_proxy.context,
self.image_proxy,
[location])
def __delitem__(self, i):
location = None
try:
location = self.value.__getitem__(i)
except Exception:
return self.value.__delitem__(i)
delete_image_from_backend(self.image_proxy.context,
self.image_proxy.store_api,
self.image_proxy.image.image_id,
location['url'])
self.value.__delitem__(i)
def __delslice__(self, i, j):
i = max(i, 0)
j = max(j, 0)
locations = []
try:
locations = self.value.__getslice__(i, j)
except Exception:
return self.value.__delslice__(i, j)
for location in locations:
delete_image_from_backend(self.image_proxy.context,
self.image_proxy.store_api,
self.image_proxy.image.image_id,
location['url'])
self.value.__delitem__(i)
def __iadd__(self, other):
self.extend(other)
return self
def __contains__(self, location):
return location in self.value
def __len__(self):
return len(self.value)
def __cast(self, other):
if isinstance(other, StoreLocations):
return other.value
else:
return other
def __cmp__(self, other):
return cmp(self.value, self.__cast(other))
def __iter__(self):
return iter(self.value)
def __copy__(self):
return type(self)(self.image_proxy, self.value)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
value = copy.deepcopy(self.value, memo)
self.image_proxy.image.locations = value
return type(self)(self.image_proxy, value)
def _locations_proxy(target, attr):
"""
Make a location property proxy on the image object.
:param target: the image object on which to add the proxy
:param attr: the property proxy we want to hook
"""
def get_attr(self):
value = getattr(getattr(self, target), attr)
return StoreLocations(self, value)
def set_attr(self, value):
if not isinstance(value, (list, StoreLocations)):
raise exception.BadStoreUri(_('Invalid locations: %s') % value)
ori_value = getattr(getattr(self, target), attr)
if ori_value != value:
# NOTE(zhiyan): Enforced locations list was previously empty list.
if len(ori_value) > 0:
raise exception.Invalid(_('Original locations is not empty: '
'%s') % ori_value)
# NOTE(zhiyan): Check locations are all valid.
for location in value:
_check_image_location(self.context, self.store_api,
location)
if value.count(location) > 1:
raise exception.DuplicateLocation(location=location['url'])
_set_image_size(self.context, getattr(self, target), value)
return setattr(getattr(self, target), attr, list(value))
def del_attr(self):
value = getattr(getattr(self, target), attr)
while len(value):
delete_image_from_backend(self.context, self.store_api,
self.image.image_id, value[0]['url'])
del value[0]
setattr(getattr(self, target), attr, value)
return delattr(getattr(self, target), attr)
return property(get_attr, set_attr, del_attr)
pattern = re.compile(r'^https?://\S+/v2/images/\S+$')
class ImageProxy(glance.domain.proxy.Image):
locations = _locations_proxy('image', 'locations')
def __init__(self, image, context, store_api):
self.image = image
self.context = context
self.store_api = store_api
proxy_kwargs = {
'context': context,
'image': self,
'store_api': store_api,
}
super(ImageProxy, self).__init__(
image, member_repo_proxy_class=ImageMemberRepoProxy,
member_repo_proxy_kwargs=proxy_kwargs)
def delete(self):
self.image.delete()
if self.image.locations:
for location in self.image.locations:
self.store_api.delete_image_from_backend(self.context,
self.store_api,
self.image.image_id,
location['url'])
def set_data(self, data, size=None):
if size is None:
size = 0 # NOTE(markwash): zero -> unknown size
location, size, checksum, loc_meta = self.store_api.add_to_backend(
self.context, CONF.default_store,
self.image.image_id, utils.CooperativeReader(data), size)
loc_meta = loc_meta or {}
loc_meta['is_default'] = 'true'
self.image.locations = [{'url': location, 'metadata': loc_meta}]
self.image.size = size
self.image.checksum = checksum
self.image.status = 'active'
def get_data(self):
if not self.image.locations:
raise exception.NotFound(_("No image data could be found"))
err = None
for loc in self.image.locations:
if pattern.match(loc['url']):
continue
try:
data, size = self.store_api.get_from_backend(self.context,
loc['url'])
return data
except Exception as e:
LOG.warn(_('Get image %(id)s data failed: '
'%(err)s.') % {'id': self.image.image_id,
'err': six.text_type(e)})
err = e
# tried all locations
LOG.error(_('Glance tried all locations to get data for image %s '
'but all have failed.') % self.image.image_id)
raise err
class ImageMemberRepoProxy(glance.domain.proxy.Repo):
def __init__(self, repo, image, context, store_api):
self.repo = repo
self.image = image
self.context = context
self.store_api = store_api
super(ImageMemberRepoProxy, self).__init__(repo)
def _set_acls(self):
public = self.image.visibility == 'public'
if self.image.locations and not public:
member_ids = [m.member_id for m in self.repo.list()]
for location in self.image.locations:
self.store_api.set_acls(self.context, location['url'], public,
read_tenants=member_ids)
def add(self, member):
super(ImageMemberRepoProxy, self).add(member)
self._set_acls()
def remove(self, member):
super(ImageMemberRepoProxy, self).remove(member)
self._set_acls()

View File

@ -1,165 +0,0 @@
Openstack Neutron DVR patch
===============================
To solve the scalability problem in the OpenStack Neutron Deployment and to distribute the Network Node load to other Compute Nodes, some people proposed a solusion which is named DVR(Distributed Virtual Router).Distributed Virtual Router solves both the problems by providing a solution that would fit into the existing model.
DVR feature code has been merged into the neutron master branch, Neutron Juno release version would have expected the DVR characteristic. This patch was download from DVR branch on 1st June.
Key modules
-----------
* L2 Agent Doc
https://docs.google.com/document/d/1depasJSnGZPOnRLxEC_PYsVLcGVFXZLqP52RFTe21BE/edit#heading=h.5w7clq272tji
* L3 Agent Doc
https://docs.google.com/document/d/1jCmraZGirmXq5V1MtRqhjdZCbUfiwBhRkUjDXGt5QUQ/edit
Addressed by: https://review.openstack.org/84223
* Add L3 Extension for Distributed Routers
Addressed by: https://review.openstack.org/87730
* L2 Agent/ML2 Plugin changes for L3 DVR
Addressed by: https://review.openstack.org/88442
* Add 'ip neigh' to ip_lib
Addressed by: https://review.openstack.org/89413
* Modify L3 Agent for Distributed Routers
Addressed by: https://review.openstack.org/89694
* Add L3 Scheduler Changes for Distributed Routers
Addressed by: https://review.openstack.org/93233
* Add 'ip rule add from' to ip_lib
Addressed by: https://review.openstack.org/96389
* Addressed merge conflict
Addressed by: https://review.openstack.org/97028
* Refactor some router-related methods
Addressed by: https://review.openstack.org/97275
* Allow L3 base to handle extensions on router creation
Addressed by: https://review.openstack.org/102101
* L2 Model additions to support DVR
Addressed by: https://review.openstack.org/102332
* RPC additions to support DVR
Addressed by: https://review.openstack.org/102398
* ML2 additions to support DVR
Requirements
------------
* openstack-neutron-server-2014.1-1.1 has been installed
* oslo.db-0.2.0 has been installed
* sqlalchemy-migrate-0.9.1 has been installed
Installation
------------
We provide two ways to install the DVR patch code. In this section, we will guide you through installing the neutron DVR code with the minimum configuration.
* **Note:**
- Make sure you have an existing installation of **Openstack Icehouse**.
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
$NEUTRON_CONFIG_PARENT_DIR/neutron.conf
(replace the $... with actual directory names.)
* **Manual Installation**
- Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g.
```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR```
(replace the $... with actual directory name.)
- Navigate to the local repository and copy the contents in 'etc' sub-directory to the corresponding places in existing neutron, e.g.
```cp -r $LOCAL_REPOSITORY_DIR/etc $NEUTRON_CONFIG_DIR```
(replace the $... with actual directory name.)
- Update the neutron configuration file (e.g. /etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide.
1)update l3 agent configurations(/etc/neutron/l3_agent.ini)
```
[DEFAULT]
...
distributed_agent=True
```
2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
```
[AGENT]
...
enable_distributed_routing = True
```
- Remove the neutron DB
- Create the neutron DB
```neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head```
- Restart the neutron-server/openvswitch-agent/l3-agent.
```service openstack-neutron restart```
```service openstack-neutron-openvswitch-agent restart```
```service openstack-neutron-l3-agent restart```
- Done.
* **Automatic Installation**
- Navigate to the installation directory and run installation script.
```
cd $LOCAL_REPOSITORY_DIR/installation
sudo bash ./install.sh
```
(replace the $... with actual directory name.)
- Done. The installation code should setup the DVR code without the minimum configuration modifying. Check the "Configurations" section for a full configuration guide.
1)update l3 agent configurations(/etc/neutron/l3_agent.ini)
```
[DEFAULT]
...
distributed_agent=True
```
2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
```
[AGENT]
...
enable_distributed_routing = True
```
* **Troubleshooting**
In case the automatic installation process is not complete, please check the followings:
- Make sure your OpenStack version is Icehouse.
- Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide.
- The installation code will automatically add the related codes to $NEUTRON_PARENT_DIR/nova but not modify the related configuration, you should update the related configurations manually.
- In case the automatic installation does not work, try to install manually.
Configurations
--------------
* This is a (default) configuration sample for the l2 proxy. Please add/modify these options in (/etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini).
* Note:
- Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name.
- Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints.
1)add or update l3 agent configurations(/etc/neutron/l3_agent.ini)
```
[DEFAULT]
...
#Enables distributed router agent function
distributed_agent=True
```
2)add or update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
```
[AGENT]
...
#Make the l2 agent run in dvr mode
enable_distributed_routing = True
```

View File

@ -1,148 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (c) 2014 Huawei Technologies.
_MYSQL_PASS="Galax8800"
_NEUTRON_CONF_DIR="/etc/neutron"
_NEUTRON_CONF_FILE='neutron.conf'
_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages"
_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron"
_NEUTRON_L2_CONFIG_FILE='/plugins/openvswitch/ovs_neutron_plugin.ini'
_NEUTRON_L3_CONFIG_FILE='l3_agent.ini'
# if you did not make changes to the installation files,
# please do not edit the following directories.
_CODE_DIR="../neutron/"
_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-dvr-code-installation-backup"
l2_config_option_list="\[AGENT\]:firewall_driver=neutron.agent.firewall.NoopFirewallDriver \[SECURITYGROUP\]:enable_distributed_routing=True"
l3_config_option_list="\[DEFAULT\]:distributed_agent=True"
#_SCRIPT_NAME="${0##*/}"
#_SCRIPT_LOGFILE="/var/log/neutron-dvr-code/installation/${_SCRIPT_NAME}.log"
if [[ ${EUID} -ne 0 ]]; then
echo "Please run as root."
exit 1
fi
##Redirecting output to logfile as well as stdout
#exec > >(tee -a ${_SCRIPT_LOGFILE})
#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2)
cd `dirname $0`
echo "checking installation directories..."
if [ ! -d "${_NEUTRON_DIR}" ] ; then
echo "Could not find the neutron installation. Please check the variables in the beginning of the script."
echo "aborted."
exit 1
fi
if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}"